code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : List[Any] = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = SavedModel()
SCREAMING_SNAKE_CASE = []
with open(os.path.join(UpperCAmelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCAmelCase__ )] )
with open(UpperCAmelCase__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCAmelCase__ )
if strict and len(UpperCAmelCase__ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(UpperCAmelCase__ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*UpperCAmelCase__ , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 403 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : List[str] = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
_lowerCamelCase : int = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowercase ( a ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = SqueezeBertTokenizer
def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]="[UNK]" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : Tuple="[PAD]" , _UpperCamelCase : int="[CLS]" , _UpperCamelCase : Tuple="[MASK]" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 403 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Dict = logging.get_logger(__name__)
A : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCAmelCase : int ='''table-transformer'''
__UpperCAmelCase : int =['''past_key_values''']
__UpperCAmelCase : Dict ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = backbone_config.get("model_type" )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None, None, None
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def snake_case ( self ):
return self.encoder_attention_heads
@property
def snake_case ( self ):
return self.d_model
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCAmelCase : str =version.parse("""1.11""" )
@property
def snake_case ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def snake_case ( self ):
return 1e-5
@property
def snake_case ( self ):
return 12
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[int] = logging.get_logger(__name__)
A : List[str] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bit"""
__UpperCAmelCase : Optional[int] =["""preactivation""", """bottleneck"""]
__UpperCAmelCase : List[str] =["""SAME""", """VALID"""]
def __init__( self , __a=3 , __a=64 , __a=[2_56, 5_12, 10_24, 20_48] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = global_padding
__lowerCAmelCase = num_groups
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = embedding_dynamic_padding
__lowerCAmelCase = output_stride
__lowerCAmelCase = width_factor
__lowerCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 282 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__magic_name__ : Optional[Any] = datasets.logging.get_logger(__name__)
__magic_name__ : str = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__magic_name__ : Tuple = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__magic_name__ : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="dummy_doc" ):
'''simple docstring'''
_snake_case = {doc: key_lines}
_snake_case = {doc: sys_lines}
_snake_case = {}
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case , _snake_case = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_snake_case = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_snake_case = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if remove_nested:
_snake_case , _snake_case = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_snake_case , _snake_case = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_snake_case = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = get_coref_infos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = {}
_snake_case = 0
_snake_case = 0
for name, metric in metrics:
_snake_case , _snake_case , _snake_case = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
_snake_case = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_snake_case = line.split()[5]
if not parse_col == "-":
_snake_case = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False ):
_snake_case = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_snake_case = util.check_gold_parse_annotation(lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_snake_case = evaluate(
key_lines=lowerCamelCase , sys_lines=lowerCamelCase , metrics=lowerCamelCase , NP_only=lowerCamelCase , remove_nested=lowerCamelCase , keep_singletons=lowerCamelCase , min_span=lowerCamelCase , )
return score
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] =StableDiffusionSAGPipeline
UpperCAmelCase__ : Any =TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : int =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] =False
def _lowercase ( self : Dict ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=0 ) ->Union[str, Any]:
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : int = torch.manual_seed(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = """."""
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = """."""
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowercase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE : Optional[Any] = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = """."""
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Tuple = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 446 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""gpt_neox_japanese"""
def __init__( self : Any , UpperCAmelCase__ : Any=3_2_0_0_0 , UpperCAmelCase__ : Dict=2_5_6_0 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=1.00 , UpperCAmelCase__ : List[Any]=1_0_0_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=3_1_9_9_6 , UpperCAmelCase__ : Tuple=3_1_9_9_9 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_multiple_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = rotary_pct
SCREAMING_SNAKE_CASE : Tuple = rotary_emb_base
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = hidden_dropout
| 446 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __a ( self : Any ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4_0 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Tuple="silu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : str=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = last_hidden_size
__a = num_attention_heads
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Any ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :List[str] =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ :str =(
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ :Tuple =False
a_ :Dict =False
a_ :int =False
a_ :Optional[int] =False
def __a ( self : List[str] ):
'''simple docstring'''
__a = MobileViTModelTester(self )
__a = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __a ( self : int ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(SCREAMING_SNAKE_CASE__ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
__a = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self : List[str] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(SCREAMING_SNAKE_CASE__ )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(SCREAMING_SNAKE_CASE__ )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(5_0, 6_0)] )
__a = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
__a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
__a = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 582 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
if len(UpperCamelCase__ ) < 2:
return collection
def circle_sort_util(UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> bool:
_UpperCAmelCase : Optional[int] = False
if low == high:
return swapped
_UpperCAmelCase : str = low
_UpperCAmelCase : str = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = (
collection[right],
collection[left],
)
_UpperCAmelCase : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : int = low + int((high - low) / 2 )
_UpperCAmelCase : str = circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[str] = circle_sort_util(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
return swapped or left_swap or right_swap
_UpperCAmelCase : Optional[Any] = True
while is_not_sorted is True:
_UpperCAmelCase : Optional[Any] = circle_sort_util(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 )
return collection
if __name__ == "__main__":
_lowerCAmelCase :str = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 506 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 303 | 0 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_A = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_A = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_A = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ) -> Optional[int]:
lowerCamelCase : List[Any] = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 133 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Optional[Any] = [True] * (num + 1)
lowerCamelCase : int = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 133 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int = 2000000 ) -> int:
"""simple docstring"""
__UpperCamelCase = [0 for i in range(n + 1 )]
__UpperCamelCase = 1
__UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowercase ):
__UpperCamelCase = 1
__UpperCamelCase = 0
for i in range(__lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 399 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : int =0
try:
a__ : str =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 399 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : List[str] = """glpn"""
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[2, 2, 2, 2] , __lowerCAmelCase=[8, 4, 2, 1] , __lowerCAmelCase=[32, 64, 160, 256] , __lowerCAmelCase=[7, 3, 3, 3] , __lowerCAmelCase=[4, 2, 2, 2] , __lowerCAmelCase=[1, 2, 5, 8] , __lowerCAmelCase=[4, 4, 4, 4] , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=64 , __lowerCAmelCase=10 , __lowerCAmelCase=-1 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = max_depth
UpperCamelCase__ = head_in_index
| 721 |
from typing import Any
def _UpperCamelCase (a__ :list ):
"""simple docstring"""
if not input_list:
return []
UpperCamelCase__ = [input_list.count(a__ ) for value in input_list]
UpperCamelCase__ = max(a__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCAmelCase =pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase = expected_configs[0]
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 333 | 0 |
from heapq import heappop, heappush
import numpy as np
def __magic_name__ ( A : np.ndarray, A : tuple[int, int], A : tuple[int, int], A : bool, ):
'''simple docstring'''
a , a = grid.shape
a = [-1, 1, 0, 0]
a = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a , a = [(0, source)], set()
a = np.full((rows, cols), np.inf )
a = 0
a = np.empty((rows, cols), dtype=A )
a = None
while queue:
((a) , (a)) = heappop(A )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a = []
while (x, y) != source:
path.append((x, y) )
a , a = predecessors[x, y]
path.append(A ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A ) ):
a , a = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A, (dist + 1, (nx, ny)) )
a = dist + 1
a = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Dict = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : Optional[Any], _snake_case : Union[str, Any]=50_265, _snake_case : Optional[Any]=768, _snake_case : Tuple=12, _snake_case : Union[str, Any]=12, _snake_case : Optional[int]=3_072, _snake_case : List[str]="gelu", _snake_case : Optional[int]=0.1, _snake_case : Dict=0.1, _snake_case : Tuple=512, _snake_case : Union[str, Any]=2, _snake_case : Optional[Any]=0.02, _snake_case : Any=1E-12, _snake_case : Union[str, Any]=1, _snake_case : Dict=0, _snake_case : str=2, _snake_case : Tuple="absolute", _snake_case : str=True, _snake_case : List[str]=None, **_snake_case : Union[str, Any], ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, **_snake_case )
snake_case : Optional[Any] =vocab_size
snake_case : Dict =hidden_size
snake_case : Tuple =num_hidden_layers
snake_case : str =num_attention_heads
snake_case : Union[str, Any] =hidden_act
snake_case : Optional[Any] =intermediate_size
snake_case : str =hidden_dropout_prob
snake_case : str =attention_probs_dropout_prob
snake_case : Tuple =max_position_embeddings
snake_case : Optional[int] =type_vocab_size
snake_case : int =initializer_range
snake_case : Optional[int] =layer_norm_eps
snake_case : Optional[Any] =position_embedding_type
snake_case : List[str] =use_cache
snake_case : Optional[Any] =classifier_dropout
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 349 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = jnp.floataa
__UpperCAmelCase = True
def __snake_case ( self : str ):
'''simple docstring'''
super().setup()
snake_case : List[Any] =nn.Dense(5, dtype=self.dtype )
def __call__( self : Optional[int], *_snake_case : str, **_snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[int] =super().__call__(*_snake_case, **_snake_case )
snake_case : Optional[int] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def cross_entropy(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
snake_case : int =logits.shape[-1]
snake_case : Any =(labels[..., None] == jnp.arange(lowerCamelCase_ )[None]).astype('''f4''' )
snake_case : List[str] =jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
snake_case : List[Any] =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case : Optional[Any] =reduction(lowerCamelCase_ )
return loss
snake_case : Optional[int] =partial(lowerCamelCase_ , reduction=jnp.mean )
snake_case : Optional[Any] =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Tuple =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Any =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = "google/bigbird-roberta-base"
__UpperCAmelCase = 3000
__UpperCAmelCase = 1_0500
__UpperCAmelCase = 128
__UpperCAmelCase = 3
__UpperCAmelCase = 1
__UpperCAmelCase = 5
# tx_args
__UpperCAmelCase = 3e-5
__UpperCAmelCase = 0.0
__UpperCAmelCase = 2_0000
__UpperCAmelCase = 0.0_0_9_5
__UpperCAmelCase = "bigbird-roberta-natural-questions"
__UpperCAmelCase = "training-expt"
__UpperCAmelCase = "data/nq-training.jsonl"
__UpperCAmelCase = "data/nq-validation.jsonl"
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
os.makedirs(self.base_dir, exist_ok=_snake_case )
snake_case : Dict =os.path.join(self.base_dir, self.save_dir )
snake_case : Union[str, Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 4096 # no dynamic padding on TPUs
def __call__( self : List[Any], _snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : Tuple =self.collate_fn(_snake_case )
snake_case : Dict =jax.tree_util.tree_map(_snake_case, _snake_case )
return batch
def __snake_case ( self : Dict, _snake_case : str ):
'''simple docstring'''
snake_case , snake_case : Dict =self.fetch_inputs(features['''input_ids'''] )
snake_case : List[str] ={
'''input_ids''': jnp.array(_snake_case, dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_snake_case, dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''], dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''], dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''], dtype=jnp.intaa ),
}
return batch
def __snake_case ( self : Optional[Any], _snake_case : list ):
'''simple docstring'''
snake_case : int =[self._fetch_inputs(_snake_case ) for ids in input_ids]
return zip(*_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : list ):
'''simple docstring'''
snake_case : List[Any] =[1 for _ in range(len(_snake_case ) )]
while len(_snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
if seed is not None:
snake_case : Union[str, Any] =dataset.shuffle(seed=lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) // batch_size ):
snake_case : List[Any] =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_ )
@partial(jax.pmap , axis_name='''batch''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
def loss_fn(lowerCamelCase_ ):
snake_case : Dict =model_inputs.pop('''start_labels''' )
snake_case : Optional[Any] =model_inputs.pop('''end_labels''' )
snake_case : Any =model_inputs.pop('''pooled_labels''' )
snake_case : Dict =state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_ )
snake_case , snake_case , snake_case : List[Any] =outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
snake_case , snake_case : Any =jax.random.split(lowerCamelCase_ )
snake_case : List[str] =jax.value_and_grad(lowerCamelCase_ )
snake_case , snake_case : str =grad_fn(state.params )
snake_case : Optional[Any] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
snake_case : Any =jax.lax.pmean(lowerCamelCase_ , '''batch''' )
snake_case : Optional[int] =state.apply_gradients(grads=lowerCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _a ( lowerCamelCase_ , **lowerCamelCase_ ):
snake_case : List[Any] =model_inputs.pop('''start_labels''' )
snake_case : int =model_inputs.pop('''end_labels''' )
snake_case : List[str] =model_inputs.pop('''pooled_labels''' )
snake_case : Optional[Any] =state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_ )
snake_case , snake_case , snake_case : Dict =outputs
snake_case : List[Any] =state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowerCAmelCase_ ( train_state.TrainState ):
__UpperCAmelCase = struct.field(pytree_node=a_ )
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
def __snake_case ( self : Tuple, _snake_case : int, _snake_case : Any, _snake_case : Tuple, _snake_case : Any=None ):
'''simple docstring'''
snake_case : int =model.params
snake_case : List[str] =TrainState.create(
apply_fn=model.__call__, params=_snake_case, tx=_snake_case, loss_fn=_snake_case, )
if ckpt_dir is not None:
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] =restore_checkpoint(_snake_case, _snake_case )
snake_case : Tuple ={
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
snake_case , snake_case : Tuple =build_tx(**_snake_case )
snake_case : Optional[int] =train_state.TrainState(
step=_snake_case, apply_fn=model.__call__, params=_snake_case, tx=_snake_case, opt_state=_snake_case, )
snake_case : int =args
snake_case : str =data_collator
snake_case : Tuple =lr
snake_case : Union[str, Any] =params
snake_case : Tuple =jax_utils.replicate(_snake_case )
return state
def __snake_case ( self : Union[str, Any], _snake_case : int, _snake_case : int, _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Dict =self.args
snake_case : Optional[int] =len(_snake_case ) // args.batch_size
snake_case : str =jax.random.PRNGKey(0 )
snake_case : Union[str, Any] =jax.random.split(_snake_case, jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case : Any =jnp.array(0, dtype=jnp.floataa )
snake_case : Dict =get_batched_dataset(_snake_case, args.batch_size, seed=_snake_case )
snake_case : Optional[Any] =0
for batch in tqdm(_snake_case, total=_snake_case, desc=f'''Running EPOCH-{epoch}''' ):
snake_case : Tuple =self.data_collator(_snake_case )
snake_case , snake_case , snake_case : Optional[Any] =self.train_step_fn(_snake_case, _snake_case, **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
snake_case : List[Any] =jax_utils.unreplicate(state.step )
snake_case : List[Any] =running_loss.item() / i
snake_case : Tuple =self.scheduler_fn(state_step - 1 )
snake_case : Optional[int] =self.evaluate(_snake_case, _snake_case )
snake_case : Tuple ={
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_snake_case ) )
self.logger.log(_snake_case, commit=_snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''', state=_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : List[str], _snake_case : Dict ):
'''simple docstring'''
snake_case : Union[str, Any] =get_batched_dataset(_snake_case, self.args.batch_size )
snake_case : Dict =len(_snake_case ) // self.args.batch_size
snake_case : List[str] =jnp.array(0, dtype=jnp.floataa )
snake_case : Optional[int] =0
for batch in tqdm(_snake_case, total=_snake_case, desc='''Evaluating ... ''' ):
snake_case : Dict =self.data_collator(_snake_case )
snake_case : str =self.val_step_fn(_snake_case, **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __snake_case ( self : Union[str, Any], _snake_case : Optional[Any], _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Any =jax_utils.unreplicate(_snake_case )
print(f'''SAVING CHECKPOINT IN {save_dir}''', end=''' ... ''' )
self.model_save_fn(_snake_case, params=state.params )
with open(os.path.join(_snake_case, '''opt_state.msgpack''' ), '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(_snake_case, '''args.joblib''' ) )
joblib.dump(self.data_collator, os.path.join(_snake_case, '''data_collator.joblib''' ) )
with open(os.path.join(_snake_case, '''training_state.json''' ), '''w''' ) as f:
json.dump({'''step''': state.step.item()}, _snake_case )
print('''DONE''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(lowerCamelCase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
snake_case : Tuple =from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
snake_case : List[Any] =from_bytes(state.opt_state , f.read() )
snake_case : Tuple =joblib.load(os.path.join(lowerCamelCase_ , '''args.joblib''' ) )
snake_case : List[str] =joblib.load(os.path.join(lowerCamelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCamelCase_ , '''training_state.json''' ) , '''r''' ) as f:
snake_case : Optional[Any] =json.load(lowerCamelCase_ )
snake_case : Optional[int] =training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : str =num_train_steps - warmup_steps
snake_case : Dict =optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_ )
snake_case : Tuple =optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1e-7 , transition_steps=lowerCamelCase_ )
snake_case : int =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def weight_decay_mask(lowerCamelCase_ ):
snake_case : Tuple =traverse_util.flatten_dict(lowerCamelCase_ )
snake_case : List[Any] ={k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_ )
snake_case : List[str] =scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_ )
return tx, lr
| 349 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
A : List[Any] = "resnet"
A : Tuple = ["basic", "bottleneck"]
def __init__( self : Optional[int] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : int=64 , _lowerCAmelCase : Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[str]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
__snake_case : Any = num_channels
__snake_case : Optional[int] = embedding_size
__snake_case : Optional[int] = hidden_sizes
__snake_case : Tuple = depths
__snake_case : int = layer_type
__snake_case : List[Any] = hidden_act
__snake_case : Any = downsample_in_first_stage
__snake_case : int = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
__snake_case , __snake_case : Any = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[Any] = version.parse("1.11" )
@property
def snake_case__ ( self : Optional[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ):
return 1e-3
| 390 | lowercase_ = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 390 | 1 |
import torch
def __lowerCamelCase ( ) -> List[str]:
if torch.cuda.is_available():
lowerCamelCase_ : Dict = torch.cuda.device_count()
else:
lowerCamelCase_ : Dict = 0
print(f'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 278 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowercase : int = StableDiffusionLDMaDPipeline
lowercase : Any = TEXT_TO_IMAGE_PARAMS
lowercase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : Tuple = CLIPTextModel(__UpperCAmelCase )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> str:
if str(__UpperCAmelCase ).startswith('mps' ):
a : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
a : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Tuple = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : str = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Any = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
a : Any = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowercase_ ( self ) -> List[Any]:
a : Optional[int] = self.get_dummy_components()
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : List[str] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : Optional[Any] = 3 * [inputs['prompt']]
# forward
a : List[str] = ldmad_pipe(**__UpperCAmelCase )
a , a : int = output.rgb, output.depth
a : int = rgb_slice_a[0, -3:, -3:, -1]
a : List[Any] = depth_slice_a[0, -3:, -1]
a : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 3 * [inputs.pop('prompt' )]
a : Optional[int] = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
a : Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
a : List[Any] = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
a : str = prompt_embeds
# forward
a : Any = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = rgb_slice_a[0, -3:, -3:, -1]
a : Any = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowercase_ ( self ) -> Optional[int]:
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Any = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 'french fries'
a : List[str] = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Optional[int] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
a : Union[str, Any] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> Union[str, Any]:
a : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : str = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
a : Union[str, Any] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
a : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
a : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
a : Optional[int] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int:
a : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Optional[int]:
a : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Tuple = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : int = 0.49_5586
a : Dict = 0.3379_5515
a : Optional[Any] = 112.4_8518
a : List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowercase_ ( self ) -> Any:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Dict = self.get_inputs(__UpperCAmelCase )
a : Union[str, Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = 0.419_4127
a : Union[str, Any] = 0.3537_5586
a : Tuple = 0.563_8502
a : str = 0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 509 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ : Any = ["gpt2"]
SCREAMING_SNAKE_CASE__ : str = "gpt2"
if is_tf_available():
class A_ ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
a : Any = tokenizer
a : Any = AutoConfig.from_pretrained(__UpperCAmelCase )
a : str = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowercase_ ( self , __UpperCAmelCase ) -> Dict:
a : Tuple = self.tokenizer(__UpperCAmelCase )
a : List[Any] = tokenized['input_ids'].to_tensor()
a : List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
a : Optional[Any] = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
super().setUp()
a : Any = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
a : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
a : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
a : Optional[Any] = tokenizer([test_inputs] , return_tensors='tf' )
a : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
a : List[Any] = python_outputs[key].numpy()
a : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase_ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[Any] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
a : Dict = tf.constant(__UpperCAmelCase )
a : List[str] = compiled_tokenizer(__UpperCAmelCase )
a : List[Any] = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
a : Union[str, Any] = ModelToSave(tokenizer=__UpperCAmelCase )
a : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Union[str, Any] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a : Optional[int] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
a : Union[str, Any] = tf.saved_model.load(__UpperCAmelCase )
a : Optional[int] = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase_ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Optional[Any] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
a : Optional[int] = tf_tokenizer.get_config()
a : str = TFGPTaTokenizer.from_config(__UpperCAmelCase )
a : Dict = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase_ ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
a : Optional[Any] = 12_31_23
for max_length in [3, 5, 10_24]:
a : Any = tf.convert_to_tensor([self.test_sentences[0]] )
a : List[Any] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
a : Optional[int] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 509 | 1 |
from __future__ import annotations
def lowercase__( A , A = None , A = None , A = False , ):
snake_case__ : Dict = cipher_alphabet or [chr(A ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ : List[Any] = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
snake_case__ : Union[str, Any] = frequencies_dict
if not case_sensitive:
snake_case__ : Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
snake_case__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(A ) ):
snake_case__ : List[str] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ : Optional[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : Optional[Any] = decrypted_with_shift.lower().count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : Union[str, Any] = decrypted_with_shift.count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(A ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ : int = min(
A , key=A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case__
) , (
snake_case__
) ,
) : str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 170 |
from __future__ import annotations
def lowercase__( A ):
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A ={'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case_ (_a : List[str] ):
for param in module.parameters():
UpperCAmelCase = False
def snake_case_ ():
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = plt.imshow(_a )
fig.axes.get_xaxis().set_visible(_a )
fig.axes.get_yaxis().set_visible(_a )
plt.show()
def snake_case_ ():
UpperCAmelCase = datetime.now()
UpperCAmelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 358 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'upernet'
def __init__(self , A=None , A=512 , A=0.02 , A=[1, 2, 3, 6] , A=True , A=0.4 , A=384 , A=256 , A=1 , A=False , A=255 , **A , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(A , A ):
_a = backbone_config.get('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(A )
_a = backbone_config
_a = hidden_size
_a = initializer_range
_a = pool_scales
_a = use_auxiliary_head
_a = auxiliary_loss_weight
_a = auxiliary_in_channels
_a = auxiliary_channels
_a = auxiliary_num_convs
_a = auxiliary_concat_input
_a = loss_ignore_index
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 11 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = DanceDiffusionPipeline
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCamelCase = IPNDMScheduler()
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a ( self , A_ , A_=0 ):
if str(A_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def a ( self ):
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = DanceDiffusionPipeline(**A_ )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a ( self ):
return super().test_save_load_local()
@skip_mps
def a ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self ):
return super().test_attention_slicing_forward_pass()
def a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 138 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int = 100 ):
_lowerCAmelCase = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 162 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :bool , ):
_lowerCAmelCase , _lowerCAmelCase = grid.shape
_lowerCAmelCase = [-1, 1, 0, 0]
_lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCAmelCase , _lowerCAmelCase = [(0, source)], set()
_lowerCAmelCase = np.full((rows, cols) , np.inf )
_lowerCAmelCase = 0
_lowerCAmelCase = np.empty((rows, cols) , dtype=__lowerCamelCase )
_lowerCAmelCase = None
while queue:
((_lowerCAmelCase) , (_lowerCAmelCase)) = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_lowerCAmelCase , _lowerCAmelCase = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
_lowerCAmelCase = dist + 1
_lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = abs(lowerCAmelCase__ )
snake_case_ : Optional[Any] = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Tuple = abs(lowerCAmelCase__ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
return sum(int(lowerCAmelCase__ ) for c in str(abs(lowerCAmelCase__ ) ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> None:
snake_case_ : Dict = F'{func.__name__}({value})'
snake_case_ : List[str] = timeit(F'__main__.{call}' , setup="""import __main__""" )
print(F'{call:56} = {func(lowerCAmelCase__ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 714 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 0 |
def lowerCAmelCase_ (lowercase__ : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
lowerCAmelCase__ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ = n
return int(lowercase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : str = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = 'autoformer'
A : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "student_t" , _SCREAMING_SNAKE_CASE = "nll" , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7] , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = 3 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
# time series specific configuration
snake_case_ : Dict = prediction_length
snake_case_ : str = context_length if context_length is not None else prediction_length
snake_case_ : int = distribution_output
snake_case_ : Any = loss
snake_case_ : Union[str, Any] = input_size
snake_case_ : Any = num_time_features
snake_case_ : List[Any] = lags_sequence
snake_case_ : List[Any] = scaling
snake_case_ : str = num_dynamic_real_features
snake_case_ : Union[str, Any] = num_static_real_features
snake_case_ : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
snake_case_ : Tuple = cardinality
else:
snake_case_ : Optional[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
snake_case_ : str = embedding_dimension
else:
snake_case_ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
snake_case_ : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ : Any = d_model
snake_case_ : List[str] = encoder_attention_heads
snake_case_ : str = decoder_attention_heads
snake_case_ : List[str] = encoder_ffn_dim
snake_case_ : Tuple = decoder_ffn_dim
snake_case_ : List[str] = encoder_layers
snake_case_ : List[str] = decoder_layers
snake_case_ : str = dropout
snake_case_ : Dict = attention_dropout
snake_case_ : str = activation_dropout
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : int = decoder_layerdrop
snake_case_ : Optional[int] = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : List[str] = use_cache
# Autoformer
snake_case_ : List[str] = label_length
snake_case_ : int = moving_average
snake_case_ : str = autocorrelation_factor
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 114 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = 'sew-d'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=("p2c", "c2p") , _SCREAMING_SNAKE_CASE="layer_norm" , _SCREAMING_SNAKE_CASE="gelu_python" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-7 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = feat_extract_norm
snake_case_ : Union[str, Any] = feat_extract_activation
snake_case_ : Any = list(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = list(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = list(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = conv_bias
snake_case_ : Union[str, Any] = num_conv_pos_embeddings
snake_case_ : Tuple = num_conv_pos_embedding_groups
snake_case_ : Optional[int] = len(self.conv_dim )
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = squeeze_factor
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Optional[int] = position_buckets
snake_case_ : Union[str, Any] = share_att_key
snake_case_ : Optional[int] = relative_attention
snake_case_ : List[str] = norm_rel_ebd
snake_case_ : Tuple = list(_SCREAMING_SNAKE_CASE )
snake_case_ : str = hidden_act
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = hidden_dropout
snake_case_ : int = attention_dropout
snake_case_ : Any = activation_dropout
snake_case_ : Optional[Any] = feat_proj_dropout
snake_case_ : Tuple = final_dropout
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Tuple = feature_layer_norm_eps
snake_case_ : Any = initializer_range
snake_case_ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : List[str] = apply_spec_augment
snake_case_ : List[Any] = mask_time_prob
snake_case_ : str = mask_time_length
snake_case_ : Optional[Any] = mask_time_min_masks
snake_case_ : Optional[Any] = mask_feature_prob
snake_case_ : str = mask_feature_length
snake_case_ : Optional[int] = mask_feature_min_masks
# ctc loss
snake_case_ : Any = ctc_loss_reduction
snake_case_ : Tuple = ctc_zero_infinity
# sequence classification
snake_case_ : int = use_weighted_layer_sum
snake_case_ : List[Any] = classifier_proj_size
@property
def _lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 114 | 1 |
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = current_set.copy()
for row_index, row in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : int = column
continue
SCREAMING_SNAKE_CASE_ : List[str] = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Any = current_set[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [first_row]
SCREAMING_SNAKE_CASE_ : Dict = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : Tuple = final_set[0]
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : int = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = resultant
return final_set
def a__ ( A__ ):
if len(A__ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ : List[str] = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(A__, (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : Any = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : List[Any] = data_set.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : Optional[int] = data_set.pop(A__ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0, A__ )
SCREAMING_SNAKE_CASE_ : Any = data_set.copy()
SCREAMING_SNAKE_CASE_ : List[str] = simplify(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : str = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Any = temp_row[1::]
SCREAMING_SNAKE_CASE_ : List[Any] = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for item in solutions:
final.append(float(round(A__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : int =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 101 |
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """WhisperFeatureExtractor"""
_UpperCAmelCase = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = args[0]
SCREAMING_SNAKE_CASE_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 101 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class snake_case_( unittest.TestCase ):
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCamelCase = ['''accelerate''', '''launch''']
__UpperCamelCase = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCamelCase = '''default_config.yaml'''
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / '''_default_config.yaml'''
__UpperCamelCase = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase__ ( cls : Dict ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase__ ( self : int ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=UpperCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase__ ( self : List[str] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class snake_case_( unittest.TestCase ):
__UpperCamelCase = '''test-tpu'''
__UpperCamelCase = '''us-central1-a'''
__UpperCamelCase = '''ls'''
__UpperCamelCase = ['''accelerate''', '''tpu-config''']
__UpperCamelCase = '''cd /usr/share'''
__UpperCamelCase = '''tests/test_samples/test_command_file.sh'''
__UpperCamelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : int = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
| 707 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__UpperCAmelCase = random.Random()
def _snake_case ( A , A=1.0 , A=None , A=None ) -> Optional[Any]:
if rng is None:
lowerCAmelCase__ = global_rng
lowerCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=1 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=80 , lowerCamelCase_=16 , lowerCamelCase_=64 , lowerCamelCase_="hann_window" , lowerCamelCase_=80 , lowerCamelCase_=76_00 , lowerCamelCase_=1e-10 , lowerCamelCase_=True , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = min_seq_length
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = return_attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> str:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCAmelCase__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> Union[str, Any]:
if equal_length:
lowerCAmelCase__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = SpeechTaFeatureExtractor
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = SpeechTaFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase__ = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors='''np''' )
lowerCAmelCase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = range(8_00 , 14_00 , 2_00 )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feat_extract(lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ )
lowerCAmelCase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' )
lowerCAmelCase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10_00 , padding='''longest''' , return_tensors='''np''' )
lowerCAmelCase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=20_00 , padding='''longest''' , return_tensors='''np''' )
lowerCAmelCase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ = feature_extractor(audio_target=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ = np.asarray(lowerCamelCase_ )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) for x, y in zip(lowerCamelCase_ , processed_features[input_name] ) ) )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowerCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowerCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = feat_extract.num_mel_bins # hack!
lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.feat_extract_dict
lowerCAmelCase__ = True
lowerCAmelCase__ = self.feature_extraction_class(**lowerCamelCase_ )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase__ = [len(lowerCamelCase_ ) for x in speech_inputs]
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = feat_extract.num_mel_bins # hack!
lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.feat_extract_dict
lowerCAmelCase__ = True
lowerCAmelCase__ = self.feature_extraction_class(**lowerCamelCase_ )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase__ = [len(lowerCamelCase_ ) for x in speech_inputs]
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = min(lowerCamelCase_ )
lowerCAmelCase__ = feat_extract.num_mel_bins # hack!
lowerCAmelCase__ = feat_extract.pad(
lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
from datasets import load_dataset
lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# fmt: off
lowerCAmelCase__ = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = SpeechTaFeatureExtractor()
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase_ , atol=1e-6 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# fmt: off
lowerCAmelCase__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = SpeechTaFeatureExtractor()
lowerCAmelCase__ = feature_extractor(audio_target=lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: int = """table-transformer"""
SCREAMING_SNAKE_CASE_: int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_: int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __a=True , __a=None , __a=3 , __a=100 , __a=6 , __a=2048 , __a=8 , __a=6 , __a=2048 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=256 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__a , __a ):
A__ = backbone_config.get('model_type' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(__a )
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.d_model
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Tuple = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return 1E-5
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return 12
| 260 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case = Features({"""image""": Image()} )
_snake_case = Features({"""labels""": ClassLabel} )
_snake_case = "image"
_snake_case = "labels"
def UpperCAmelCase ( self , A ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
snake_case : Dict = copy.deepcopy(self )
snake_case : Optional[Any] = self.label_schema.copy()
snake_case : Union[str, Any] = features[self.label_column]
snake_case : List[str] = label_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 8 ) -> str:
_A = ascii_letters + digits + punctuation
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_snake_case )
_A = i // 3
_A = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_A = (
chars_incl
+ random(_snake_case , quotient + remainder )
+ random(_snake_case , _snake_case )
+ random(_snake_case , _snake_case )
)
_A = list(_snake_case )
shuffle(_snake_case )
return "".join(_snake_case )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> str:
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] ) -> str:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Union[str, Any] ) -> str:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Dict ) -> str:
pass # Put your code here...
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int = 8 ) -> bool:
if len(_snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
_A = any(char in ascii_uppercase for char in password )
_A = any(char in ascii_lowercase for char in password )
_A = any(char in digits for char in password )
_A = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_A = int(input('''Please indicate the max length of your password: ''' ).strip() )
_A = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(_snake_case ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(_snake_case , _snake_case ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 2 |
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE (torch.nn.Module ):
def __init__( self : int , a : Optional[Any]="sayef/fsner-bert-base-uncased" )-> str:
"""simple docstring"""
super(a , self ).__init__()
lowercase__ = AutoModel.from_pretrained(a , return_dict=a )
lowercase__ = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase__ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **a : Tuple )-> List[str]:
"""simple docstring"""
return self.bert(**a ).last_hidden_state
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Any )-> Optional[Any]:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[str] , a : List[str] , a : List[str]=1 )-> Dict:
"""simple docstring"""
return self.softmax(T * self.cos(a , a ) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] , a : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = W_supports['sizes'].tolist()
lowercase__ = W_supports['start_token_id'].item()
lowercase__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__ = self.BERT(**a )
lowercase__ = self.BERT(**a )
lowercase__ = None
lowercase__ = None
lowercase__ = W_supports['input_ids'] == start_token_id
lowercase__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(a ):
if i == 0:
lowercase__ = 0
else:
lowercase__ = support_sizes[i - 1]
lowercase__ = S[s : s + size][start_token_masks[s : s + size]]
lowercase__ = S[s : s + size][end_token_masks[s : s + size]]
lowercase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__ = torch.vstack((p_starts, p_start) )
lowercase__ = torch.vstack((p_ends, p_end) )
else:
lowercase__ = p_start
lowercase__ = p_end
return p_starts, p_ends
| 235 | 0 |
from __future__ import annotations
_A : int = 8.988e9 # units = N * m^s * C^-2
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> dict[str, float]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowerCamelCase__ : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCamelCase__ : List[Any] = abs(UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCamelCase__ : List[str] = abs(UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCamelCase__ : List[str] = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Any , A : str ) ->int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCamelCase__ : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(A )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
lowerCamelCase__ : Tuple = '''sgugger/tiny-distilbert-classification'''
lowerCamelCase__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A )
lowerCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Tuple ) ->Dict:
lowerCamelCase__ : int = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A )
lowerCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Union[str, Any] ) ->Tuple:
lowerCamelCase__ : Optional[int] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
lowerCamelCase__ : List[Any] = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : List[Any] ) ->Any:
lowerCamelCase__ : Optional[int] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Optional[Any] = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmark(A )
lowerCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : Tuple = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Dict = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ : int = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Dict = TensorFlowBenchmark(A , configs=[config] )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __lowerCamelCase ( self : Dict ) ->Dict:
lowerCamelCase__ : Dict = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
lowerCamelCase__ : List[Any] = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(A , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(A , '''env.csv''' ) , multi_process=A , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''env.csv''' ) ).exists() )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
lowerCamelCase__ : List[Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(A : int ):
self.assertTrue(hasattr(A , '''sequential''' ) )
self.assertTrue(hasattr(A , '''cumulative''' ) )
self.assertTrue(hasattr(A , '''current''' ) )
self.assertTrue(hasattr(A , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , '''log.txt''' ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
lowerCamelCase__ : Any = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , '''log.txt''' ) ).exists() )
| 130 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__UpperCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCamelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__UpperCamelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase( self ) -> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowercase( self ) -> List[str]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 383 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "speech_to_text_2"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _SCREAMING_SNAKE_CASE=10_000 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2_048 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1_024 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = decoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_target_positions
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 383 | 1 |
"""simple docstring"""
def UpperCamelCase ( _A ) -> Optional[Any]:
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( _A ) -> List[Any]:
lowercase : List[Any] = 0
lowercase : Union[str, Any] = 2
while digits < n:
index += 1
lowercase : Optional[Any] = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def UpperCamelCase ( _A = 1_000 ) -> Tuple:
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 701 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = (PNDMScheduler,)
_SCREAMING_SNAKE_CASE : Optional[int] = (("""num_inference_steps""", 50),)
def __snake_case ( self :int , **__magic_name__ :List[str] ) ->str:
lowercase : Dict = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__magic_name__ )
return config
def __snake_case ( self :Any , __magic_name__ :Any=0 , **__magic_name__ :List[Any] ) ->Optional[int]:
lowercase : List[Any] = dict(self.forward_default_kwargs )
lowercase : str = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowercase : Any = self.dummy_sample
lowercase : Optional[Any] = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : int = self.get_scheduler_config(**__magic_name__ )
lowercase : int = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowercase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowercase : Union[str, Any] = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowercase : Dict = dummy_past_residuals[:]
lowercase : Optional[int] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Dict = new_scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase : Union[str, Any] = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : int = new_scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case ( self :Optional[Any] ) ->Tuple:
pass
def __snake_case ( self :List[str] , __magic_name__ :str=0 , **__magic_name__ :Tuple ) ->Any:
lowercase : List[str] = dict(self.forward_default_kwargs )
lowercase : Optional[Any] = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowercase : Any = self.dummy_sample
lowercase : Optional[Any] = 0.1 * sample
lowercase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : List[str] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowercase : Dict = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase : int = dummy_past_residuals[:]
lowercase : List[str] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Any = new_scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase : Any = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : int = new_scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case ( self :List[Any] , **__magic_name__ :Optional[int] ) ->int:
lowercase : Any = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config(**__magic_name__ )
lowercase : List[Any] = scheduler_class(**__magic_name__ )
lowercase : int = 10
lowercase : Union[str, Any] = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase : Any = model(__magic_name__ , __magic_name__ )
lowercase : Union[str, Any] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase : str = model(__magic_name__ , __magic_name__ )
lowercase : int = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def __snake_case ( self :List[Any] ) ->List[Any]:
lowercase : str = dict(self.forward_default_kwargs )
lowercase : Dict = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
lowercase : str = self.get_scheduler_config()
lowercase : str = scheduler_class(**__magic_name__ )
lowercase : List[Any] = self.dummy_sample
lowercase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
lowercase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase : Union[str, Any] = dummy_past_residuals[:]
lowercase : Any = scheduler.step_prk(__magic_name__ , 0 , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Union[str, Any] = scheduler.step_prk(__magic_name__ , 1 , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase : List[Any] = scheduler.step_plms(__magic_name__ , 0 , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Optional[int] = scheduler.step_plms(__magic_name__ , 1 , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __snake_case ( self :int ) ->Tuple:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__magic_name__ )
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config(steps_offset=1 )
lowercase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __snake_case ( self :int ) ->int:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __snake_case ( self :Tuple ) ->Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__magic_name__ )
def __snake_case ( self :List[Any] ) ->Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase : List[Any] = 27
for scheduler_class in self.scheduler_classes:
lowercase : List[Any] = self.dummy_sample
lowercase : Tuple = 0.1 * sample
lowercase : int = self.get_scheduler_config()
lowercase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase : int = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
def __snake_case ( self :int ) ->Dict:
with self.assertRaises(__magic_name__ ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**__magic_name__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __snake_case ( self :List[str] ) ->List[str]:
lowercase : Tuple = self.full_loop()
lowercase : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def __snake_case ( self :str ) ->Union[str, Any]:
lowercase : Tuple = self.full_loop(prediction_type="""v_prediction""" )
lowercase : Dict = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Any = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def __snake_case ( self :List[Any] ) ->List[str]:
# We specify different beta, so that the first alpha is 0.99
lowercase : Tuple = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowercase : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
lowercase : int = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def __snake_case ( self :Optional[int] ) ->Tuple:
# We specify different beta, so that the first alpha is 0.99
lowercase : str = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowercase : int = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Optional[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 348 | 0 |
def lowerCamelCase__ ( _a):
if len(_a) <= 1:
return lst
SCREAMING_SNAKE_CASE : List[str] = 1
while i < len(_a):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 1
return lst
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 25 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer
__lowercase : int = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , **lowerCAmelCase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 155 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_A = logging.get_logger(__name__)
_A = 'T5Config'
def lowerCamelCase__ ( __lowerCAmelCase : jnp.array , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
lowerCAmelCase_ = jnp.zeros_like(__lowerCAmelCase )
lowerCAmelCase_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase_ = shifted_input_ids.at[:, 0].set(__lowerCAmelCase )
lowerCAmelCase_ = jnp.where(shifted_input_ids == -100 , __lowerCAmelCase , __lowerCAmelCase )
return shifted_input_ids
class _lowerCAmelCase ( __a ):
_lowercase ='''mt5'''
_lowercase =MTaConfig
class _lowerCAmelCase ( __a ):
_lowercase ='''mt5'''
_lowercase =MTaConfig
class _lowerCAmelCase ( __a ):
_lowercase ='''mt5'''
_lowercase =MTaConfig
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''transfo-xl'''
_lowercase =['''mems''']
_lowercase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=267_735 , _UpperCamelCase=[20_000, 40_000, 200_000] , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=4_096 , _UpperCamelCase=4 , _UpperCamelCase=False , _UpperCamelCase=18 , _UpperCamelCase=1_600 , _UpperCamelCase=1_000 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=-1 , _UpperCamelCase=True , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="normal" , _UpperCamelCase=0.01 , _UpperCamelCase=0.01 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(_UpperCamelCase )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> List[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , _UpperCamelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 279 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
def is_in_circle(__snake_case :float , __snake_case :float ) -> bool:
__SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
__SCREAMING_SNAKE_CASE = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _A ( __snake_case :int , __snake_case :Callable[[float], float] , __snake_case :float = 0.0 , __snake_case :float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _A ( __snake_case :int , __snake_case :float = 0.0 , __snake_case :float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__snake_case :float ) -> float:
return x
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , __snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def _A ( __snake_case :int ) -> None:
"""simple docstring"""
def function_to_integrate(__snake_case :float ) -> float:
return sqrt(4.0 - x * x )
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCamelCase : List[str] = _modexpt(_lowerCamelCase , exponent // 2 , _lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCamelCase , exponent - 1 , _lowerCamelCase )) % modulo_value
def lowerCamelCase_( _lowerCamelCase = 1777 , _lowerCamelCase = 1855 , _lowerCamelCase = 8 ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = base
for _ in range(1 , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = _modexpt(_lowerCamelCase , _lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''') | 386 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : int = logging.get_logger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: List[str] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: bool = True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : int = size if size is not None else {"height": 384, "width": 384}
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : str = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Optional[Any] = resample
_lowerCamelCase : str = do_rescale
_lowerCamelCase : int = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Optional[Any] = do_convert_rgb
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_lowerCamelCase : Dict = (size["height"], size["width"])
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: List[str] ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Dict[str, int]] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Dict = size if size is not None else self.size
_lowerCamelCase : Tuple = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Union[str, Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_lowerCamelCase : Tuple = [self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
_lowerCamelCase : str = BatchFeature(data={"pixel_values": images} ,tensor_type=__lowerCAmelCase )
return encoded_outputs | 386 | 1 |
import argparse
from collections import defaultdict
def _lowercase( __a : Union[str, Any] , __a : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] ):
a__ =f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =f"""class {class_name}("""
a__ =f"""{4 * ' '}def {test_name}("""
a__ =f"""{8 * ' '}{correct_line.split()[0]}"""
a__ =f"""{16 * ' '}{correct_line.split()[0]}"""
a__ =False
a__ =False
a__ =False
a__ =False
a__ =0
a__ =0
a__ =[]
for line in lines:
if line.startswith(__a ):
a__ =True
elif in_class and line.startswith(__a ):
a__ =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
a__ =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
a__ =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
a__ =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
a__ =a__ =a__ =a__ =False
else:
new_lines.append(__a )
with open(__a , 'w' ) as f:
for line in new_lines:
f.write(__a )
def _lowercase( __a : int , __a : Union[str, Any]=None ):
if fail is not None:
with open(__a , 'r' ) as f:
a__ ={l.strip() for l in f.readlines()}
else:
a__ =None
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =defaultdict(__a )
for line in correct_lines:
a__ , a__ , a__ , a__ =line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_lowerCAmelCase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : list[str] | None = None ):
_A : str = word_bank or []
# create a table
_A : int = len(lowerCamelCase ) + 1
_A : list[list[list[str]]] = []
for _ in range(lowerCamelCase ):
table.append([] )
# seed value
_A : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase )]:
combination.reverse()
return table[len(lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 128 | 0 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 327 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase () -> List[Any]:
raise RuntimeError('CUDA out of memory.' )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def __A ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def __A ( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> str:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def __A ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 327 | 1 |
import re
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(snake_case__ ,snake_case__ ) )
if __name__ == "__main__":
A_: Optional[Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 398 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A=None , __A=None , *__A , **__A ):
"""simple docstring"""
super().__init__(*__A , **__A )
if config is None:
assert isinstance(self.model , __A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowerCamelCase : List[str] = self.model.config
else:
lowerCamelCase : Union[str, Any] = config
lowerCamelCase : Optional[int] = data_args
lowerCamelCase : Any = self.config.tgt_vocab_size if isinstance(self.config , __A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCamelCase : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCamelCase : str = label_smoothed_nll_loss
def _snake_case ( self , __A ):
"""simple docstring"""
if self.optimizer is None:
lowerCamelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCamelCase : int = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCamelCase : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCamelCase : List[Any] = Adafactor
lowerCamelCase : Union[str, Any] = {"scale_parameter": False, "relative_step": False}
else:
lowerCamelCase : List[Any] = AdamW
lowerCamelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCamelCase : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
lowerCamelCase : Optional[int] = OSS(
params=__A , optim=__A , **__A , )
else:
lowerCamelCase : Union[str, Any] = optimizer_cls(__A , **__A )
if self.lr_scheduler is None:
lowerCamelCase : Optional[int] = self._get_lr_scheduler(__A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCamelCase : Any = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCamelCase : List[str] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCamelCase : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__A )
return scheduler
def _snake_case ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCamelCase : List[str] = model(**__A , use_cache=__A )[0]
lowerCamelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCamelCase , lowerCamelCase : List[Any] = model(**__A , labels=__A , use_cache=__A )[:2]
else:
# compute label smoothed loss
lowerCamelCase : int = model(**__A , use_cache=__A )[0]
lowerCamelCase : Dict = torch.nn.functional.log_softmax(__A , dim=-1 )
lowerCamelCase , lowerCamelCase : Dict = self.loss_fn(__A , __A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = inputs.pop("labels" )
lowerCamelCase , lowerCamelCase : Union[str, Any] = self._compute_loss(__A , __A , __A )
return loss
def _snake_case ( self , __A , __A , __A , __A = None , ):
"""simple docstring"""
lowerCamelCase : Dict = self._prepare_inputs(__A )
lowerCamelCase : List[str] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCamelCase : Any = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **__A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase : Optional[Any] = self._pad_tensors_to_max_len(__A , gen_kwargs["max_length"] )
lowerCamelCase : Dict = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCamelCase , lowerCamelCase : List[Any] = self._compute_loss(__A , __A , __A )
lowerCamelCase : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCamelCase : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase : Dict = self._pad_tensors_to_max_len(__A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
lowerCamelCase : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCamelCase : str = tensor
return padded_tensor
| 231 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_snake_case = parser.parse_args()
_snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_snake_case = CLIPImageProcessor()
_snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 231 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Union[str, Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["ConditionalDetrFeatureExtractor"]
UpperCAmelCase_ : str = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCamelCase :
'''simple docstring'''
@property
def snake_case__ ( self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def snake_case__ ( self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def snake_case__ ( self , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , ):
"""simple docstring"""
__A : List[str] = 4
__A : List[Any] = 32
__A : Optional[Any] = (32, 32)
__A : List[str] = torch.manual_seed(0 )
__A : Union[str, Any] = torch.device(__lowercase )
__A : Optional[int] = (batch_size, num_channels) + sizes
__A : str = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase )
__A : int = {'hidden_states': hidden_states}
if include_temb:
__A : Optional[Any] = 128
__A : Any = randn_tensor((batch_size, temb_channels) , generator=__lowercase , device=__lowercase )
if include_res_hidden_states_tuple:
__A : Any = torch.manual_seed(1 )
__A : List[Any] = (randn_tensor(__lowercase , generator=__lowercase , device=__lowercase ),)
if include_encoder_hidden_states:
__A : str = floats_tensor((batch_size, 32, 32) ).to(__lowercase )
if include_skip_sample:
__A : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowercase , device=__lowercase )
return dummy_input
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
__A : int = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A ,__A : List[Any] = self.prepare_init_args_and_inputs_for_common()
__A : int = self.block_class(**__lowercase )
unet_block.to(__lowercase )
unet_block.eval()
with torch.no_grad():
__A : List[Any] = unet_block(**__lowercase )
if isinstance(__lowercase , __lowercase ):
__A : Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
__A : Any = output[0, -1, -3:, -3:]
__A : Dict = torch.tensor(__lowercase ).to(__lowercase )
assert torch_all_close(output_slice.flatten() , __lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.prepare_init_args_and_inputs_for_common()
__A : Optional[Any] = self.block_class(**__lowercase )
model.to(__lowercase )
model.train()
__A : Any = model(**__lowercase )
if isinstance(__lowercase , __lowercase ):
__A : Any = output[0]
__A : int = torch.device(__lowercase )
__A : Dict = randn_tensor(output.shape , device=__lowercase )
__A : Dict = torch.nn.functional.mse_loss(__lowercase , __lowercase )
loss.backward()
| 365 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ) -> str:
UpperCAmelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase_ : List[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCAmelCase_ : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 717 |
from sklearn.metrics import fa_score
import datasets
UpperCamelCase_ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
UpperCamelCase_ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
UpperCamelCase_ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Tuple ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def A__ ( self: int ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: List[str]=1 ,lowerCamelCase_: Union[str, Any]="binary" ,lowerCamelCase_: Any=None ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = fa_score(
lowerCamelCase_ ,lowerCamelCase_ ,labels=lowerCamelCase_ ,pos_label=lowerCamelCase_ ,average=lowerCamelCase_ ,sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 322 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 382 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__=2 ,A__=8 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=99 ,A__=16 ,A__=5 ,A__=2 ,A__=36 ,A__="gelu" ,A__=0.0 ,A__=0.0 ,A__=512 ,A__=16 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
_A : Optional[int] = parent
_A : Any = batch_size
_A : str = seq_length
_A : List[str] = is_training
_A : str = use_input_mask
_A : Any = use_token_type_ids
_A : Dict = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Tuple = intermediate_size
_A : str = hidden_act
_A : int = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Tuple = type_vocab_size
_A : Union[str, Any] = type_sequence_label_size
_A : Any = initializer_range
_A : Optional[Any] = num_labels
_A : Union[str, Any] = num_choices
_A : str = scope
def A__ ( self ):
_A : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_A : str = None
if self.use_input_mask:
_A : str = random_attention_mask([self.batch_size, self.seq_length] )
_A : int = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_A : Union[str, Any] = None
_A : List[Any] = None
_A : Optional[Any] = None
if self.use_labels:
_A : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_A : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
_A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
def A__ ( self ):
_A : Tuple = self.get_config()
_A : List[str] = 300
return config
def A__ ( self ):
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
_A : Optional[int] = True
_A : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : Optional[int] = MraModel(config=A__ )
model.to(A__ )
model.eval()
_A : Optional[int] = model(A__ ,attention_mask=A__ ,token_type_ids=A__ )
_A : List[Any] = model(A__ ,token_type_ids=A__ )
_A : Optional[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
_A : Optional[int] = True
_A : List[Any] = MraModel(A__ )
model.to(A__ )
model.eval()
_A : Optional[Any] = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
_A : Tuple = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,encoder_hidden_states=A__ ,)
_A : str = model(A__ ,attention_mask=A__ ,token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : Optional[int] = MraForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
_A : Tuple = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : str = MraForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
_A : str = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : Union[str, Any] = self.num_labels
_A : str = MraForSequenceClassification(A__ )
model.to(A__ )
model.eval()
_A : Optional[int] = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : Tuple = self.num_labels
_A : Any = MraForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
_A : List[Any] = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
_A : Tuple = self.num_choices
_A : Optional[int] = MraForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
_A : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_A : Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_A : Dict = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_A : str = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self ):
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Union[str, Any] = config_and_inputs
_A : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : List[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : Optional[int] = False
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : str = False
__snake_case : List[Any] = ()
def A__ ( self ):
_A : List[Any] = MraModelTester(self )
_A : int = ConfigTester(self ,config_class=A__ ,hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def A__ ( self ):
_A : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Any = type
self.model_tester.create_and_check_model(*A__ )
def A__ ( self ):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def A__ ( self ):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def A__ ( self ):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def A__ ( self ):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def A__ ( self ):
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def A__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Tuple = MraModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason='''MRA does not output attentions''' )
def A__ ( self ):
return
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def A__ ( self ):
_A : List[str] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_A : Optional[int] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A : Union[str, Any] = model(A__ )[0]
_A : Tuple = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,A__ )
_A : int = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4 ) )
@slow
def A__ ( self ):
_A : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_A : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A : str = model(A__ )[0]
_A : str = 50265
_A : Any = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,A__ )
_A : str = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4 ) )
@slow
def A__ ( self ):
_A : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_A : List[str] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_A : Union[str, Any] = model(A__ )[0]
_A : Tuple = 50265
_A : List[str] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape ,A__ )
_A : int = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4 ) )
| 332 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : int =logging.get_logger(__name__)
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = ["pixel_values"]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BICUBIC ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = IMAGENET_DEFAULT_MEAN ,A__ = IMAGENET_DEFAULT_STD ,**A__ ,):
super().__init__(**A__ )
_A : List[Any] = size if size is not None else {'''shortest_edge''': 224}
_A : int = get_size_dict(A__ ,default_to_square=A__ )
_A : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_A : Dict = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Any = do_resize
_A : Union[str, Any] = size
_A : List[str] = resample
_A : Dict = do_center_crop
_A : int = crop_size
_A : Tuple = do_rescale
_A : Optional[Any] = rescale_factor
_A : Optional[Any] = do_normalize
_A : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_A : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BICUBIC ,A__ = None ,**A__ ,):
_A : Optional[Any] = get_size_dict(A__ ,default_to_square=A__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_A : Union[str, Any] = int((256 / 224) * size['''shortest_edge'''] )
_A : List[str] = get_resize_output_image_size(A__ ,size=A__ ,default_to_square=A__ )
_A : Optional[int] = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A__ ,size=(size_dict['''height'''], size_dict['''width''']) ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
_A : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A__ ,size=(size['''height'''], size['''width''']) ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_A : List[Any] = resample if resample is not None else self.resample
_A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Any = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : int = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_A : Optional[Any] = image_std if image_std is not None else self.image_std
_A : str = size if size is not None else self.size
_A : Optional[Any] = get_size_dict(A__ ,default_to_square=A__ )
_A : Tuple = crop_size if crop_size is not None else self.crop_size
_A : str = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Optional[int] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(A__ ) for image in images]
if do_resize:
_A : Tuple = [self.resize(A__ ,A__ ,A__ ) for image in images]
if do_center_crop:
_A : str = [self.center_crop(A__ ,A__ ) for image in images]
if do_rescale:
_A : List[Any] = [self.rescale(A__ ,A__ ) for image in images]
if do_normalize:
_A : List[Any] = [self.normalize(A__ ,A__ ,A__ ) for image in images]
_A : Any = [to_channel_dimension_format(A__ ,A__ ) for image in images]
_A : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 332 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_snake_case = pytest.mark.integration
_snake_case = {'''comet'''}
_snake_case = importlib.util.find_spec('''fairseq''') is not None
_snake_case = {'''code_eval'''}
_snake_case = os.name == '''nt'''
_snake_case = {'''bertscore''', '''frugalscore''', '''perplexity'''}
_snake_case = importlib.util.find_spec('''transformers''') is not None
def __lowerCamelCase ( _lowercase ) -> int:
@wraps(lowerCAmelCase__ )
def wrapper(self , _lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('\"test requires Fairseq\"' )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def __lowerCamelCase ( _lowercase ) -> str:
@wraps(lowerCAmelCase__ )
def wrapper(self , _lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('\"test requires transformers\"' )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def __lowerCamelCase ( _lowercase ) -> int:
@wraps(lowerCAmelCase__ )
def wrapper(self , _lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('\"test not supported on Windows\"' )
else:
test_case(self , lowerCAmelCase__ )
return wrapper
def __lowerCamelCase ( ) -> Optional[int]:
UpperCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__A , __A , __A )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ={}
SCREAMING_SNAKE_CASE_ : Tuple =None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
UpperCamelCase = '[...]'
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __A ) ).module_path )
UpperCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=__A )
# check parameters
UpperCamelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__A , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCamelCase = doctest.testmod(__A , verbose=__A , raise_on_error=__A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = '[...]'
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __A ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCamelCase = doctest.testmod(__A , verbose=__A , raise_on_error=__A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__A ):
yield
else:
yield
@contextmanager
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE__ : List[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ):
return load_metric(os.path.join('metrics' , __A ) , *__A , **__A )
with patch('datasets.load_metric' ) as mock_load_metric:
UpperCamelCase = load_local_metric
yield
@classmethod
def __lowerCAmelCase ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase = contextmanager(__A )
UpperCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
UpperCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
import torch
def bert_cos_score_idf(_lowercase , _lowercase , *_lowercase , **_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
UpperCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCamelCase ( _lowercase ) -> Any:
def load_from_checkpoint(_lowercase ):
class _lowerCAmelCase :
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
assert len(__A ) == 2
UpperCamelCase = [0.19, 0.92]
return scores, sum(__A ) / len(__A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
UpperCamelCase = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
UpperCamelCase = load_from_checkpoint
yield
def __lowerCamelCase ( ) -> Tuple:
UpperCamelCase = load_metric(os.path.join('metrics' , 'seqeval' ) )
UpperCamelCase = 'ERROR'
UpperCamelCase = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase__ )
| 282 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=3 , __A=224 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ):
__a = size if size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
def snake_case_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
__a = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 99 | 0 |
import os
import sys
import unittest
A_ :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_ :List[str] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
A_ :List[str] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =get_test_to_tester_mapping(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =get_test_to_tester_mapping(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={'BertModelTest': 'BertModelTester'}
__UpperCamelCase : str ={
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =get_model_to_test_mapping(lowerCamelCase__ )
__UpperCamelCase : List[Any] =get_model_to_test_mapping(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] ={
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__UpperCamelCase : int ={
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =get_model_to_tester_mapping(lowerCamelCase__ )
__UpperCamelCase : int =get_model_to_tester_mapping(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__UpperCamelCase : str ={
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
| 154 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A_ :int = logging.get_logger(__name__)
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if not conversation_id:
__UpperCamelCase : int =uuid.uuida()
if past_user_inputs is None:
__UpperCamelCase : int =[]
if generated_responses is None:
__UpperCamelCase : Union[str, Any] =[]
__UpperCamelCase : uuid.UUID =conversation_id
__UpperCamelCase : List[str] =past_user_inputs
__UpperCamelCase : List[str] =generated_responses
__UpperCamelCase : Optional[str] =text
def __eq__( self , lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
__UpperCamelCase : Any =text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__UpperCamelCase : Optional[int] =text
def __lowercase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCamelCase : List[Any] =None
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
self.generated_responses.append(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
__UpperCamelCase : Any =f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__UpperCamelCase : Tuple ='user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
a , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __A ( a ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.tokenizer.pad_token_id is None:
__UpperCamelCase : int =self.tokenizer.eos_token
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int ={}
__UpperCamelCase : Tuple ={}
__UpperCamelCase : Union[str, Any] ={}
if min_length_for_response is not None:
__UpperCamelCase : Union[str, Any] =min_length_for_response
if minimum_tokens is not None:
__UpperCamelCase : Tuple =minimum_tokens
if "max_length" in generate_kwargs:
__UpperCamelCase : Optional[Any] =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : Any =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCamelCase__ , lowerCamelCase__=0 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =super().__call__(lowerCamelCase__ , num_workers=lowerCamelCase__ , **lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) == 1:
return outputs[0]
return outputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=32 ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
__UpperCamelCase : List[str] =self.tokenizer._build_conversation_input_ids(lowerCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCamelCase : List[Any] =self._legacy_parse_and_tokenize(lowerCamelCase__ )
if self.framework == "pt":
__UpperCamelCase : Any =torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCamelCase : Any =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=10 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =generate_kwargs.get('max_length' , self.model.config.max_length )
__UpperCamelCase : Dict =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__UpperCamelCase : str =max_length - minimum_tokens
__UpperCamelCase : Optional[Any] =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCamelCase : Any =model_inputs['attention_mask'][:, -trim:]
__UpperCamelCase : List[str] =model_inputs.pop('conversation' )
__UpperCamelCase : int =max_length
__UpperCamelCase : Optional[int] =self.model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
if self.model.config.is_encoder_decoder:
__UpperCamelCase : Tuple =1
else:
__UpperCamelCase : List[Any] =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : Any =model_outputs['output_ids']
__UpperCamelCase : Dict =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , )
__UpperCamelCase : str =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCamelCase__ )
return conversation
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.tokenizer.eos_token_id
__UpperCamelCase : Any =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > self.tokenizer.model_max_length:
__UpperCamelCase : Tuple =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 154 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
SCREAMING_SNAKE_CASE_ =['''onnx''']
def __init__( self : Dict , *snake_case__ : str , **snake_case__ : Any ):
'''simple docstring'''
requires_backends(self , ["onnx"] )
@classmethod
def __a ( cls : int , *snake_case__ : List[str] , **snake_case__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["onnx"] )
@classmethod
def __a ( cls : Dict , *snake_case__ : str , **snake_case__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["onnx"] )
| 438 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : str = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : Dict = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : int = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Any , snake_case : str=False )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.in_layers.0.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.in_layers.0.bias']
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.in_layers.2.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.in_layers.2.bias']
UpperCAmelCase__ : Optional[int] = checkpoint[f'{old_prefix}.emb_layers.1.weight']
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.emb_layers.1.bias']
UpperCAmelCase__ : Any = checkpoint[f'{old_prefix}.out_layers.0.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.out_layers.0.bias']
UpperCAmelCase__ : int = checkpoint[f'{old_prefix}.out_layers.3.weight']
UpperCAmelCase__ : int = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
UpperCAmelCase__ : str = checkpoint[f'{old_prefix}.skip_connection.weight']
UpperCAmelCase__ : Any = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : Dict=None )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.norm.weight']
UpperCAmelCase__ : Tuple = checkpoint[f'{old_prefix}.norm.bias']
UpperCAmelCase__ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Union[str, Any] = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase__ : List[str] = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Union[str, Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Any = checkpoint["time_embed.0.weight"]
UpperCAmelCase__ : Optional[int] = checkpoint["time_embed.0.bias"]
UpperCAmelCase__ : Any = checkpoint["time_embed.2.weight"]
UpperCAmelCase__ : Optional[Any] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase__ : List[Any] = checkpoint["label_emb.weight"]
UpperCAmelCase__ : Dict = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase__ : Any = unet_config["down_block_types"]
UpperCAmelCase__ : Tuple = unet_config["layers_per_block"]
UpperCAmelCase__ : str = unet_config["attention_head_dim"]
UpperCAmelCase__ : str = unet_config["block_out_channels"]
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Tuple = channels_list[0]
for i, layer_type in enumerate(snake_case ):
UpperCAmelCase__ : List[str] = channels_list[i]
UpperCAmelCase__ : Optional[int] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case ):
UpperCAmelCase__ : int = f'down_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Union[str, Any] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Optional[Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case ):
UpperCAmelCase__ : List[str] = f'down_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Optional[Any] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Any = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
UpperCAmelCase__ : Dict = f'down_blocks.{i}.attentions.{j}'
UpperCAmelCase__ : List[Any] = f'input_blocks.{current_layer}.1'
UpperCAmelCase__ : Optional[Any] = convert_attention(
snake_case , snake_case , snake_case , snake_case , snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'down_blocks.{i}.downsamplers.0'
UpperCAmelCase__ : Optional[int] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : Tuple = convert_resnet(snake_case , snake_case , snake_case , snake_case )
current_layer += 1
UpperCAmelCase__ : Dict = current_channels
# hardcoded the mid-block for now
UpperCAmelCase__ : int = "mid_block.resnets.0"
UpperCAmelCase__ : Optional[int] = "middle_block.0"
UpperCAmelCase__ : Any = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[int] = "mid_block.attentions.0"
UpperCAmelCase__ : Tuple = "middle_block.1"
UpperCAmelCase__ : Any = convert_attention(snake_case , snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[int] = "mid_block.resnets.1"
UpperCAmelCase__ : Union[str, Any] = "middle_block.2"
UpperCAmelCase__ : Optional[int] = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = unet_config["up_block_types"]
for i, layer_type in enumerate(snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : List[str] = f'output_blocks.{current_layer}.0'
UpperCAmelCase__ : List[str] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'up_blocks.{i}.upsamplers.0'
UpperCAmelCase__ : Optional[Any] = f'output_blocks.{current_layer-1}.1'
UpperCAmelCase__ : Optional[Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Tuple = f'output_blocks.{current_layer}.0'
UpperCAmelCase__ : Union[str, Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
UpperCAmelCase__ : Optional[int] = f'up_blocks.{i}.attentions.{j}'
UpperCAmelCase__ : List[Any] = f'output_blocks.{current_layer}.1'
UpperCAmelCase__ : Any = convert_attention(
snake_case , snake_case , snake_case , snake_case , snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'up_blocks.{i}.upsamplers.0'
UpperCAmelCase__ : Any = f'output_blocks.{current_layer-1}.2'
UpperCAmelCase__ : int = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = checkpoint["out.0.weight"]
UpperCAmelCase__ : Any = checkpoint["out.0.bias"]
UpperCAmelCase__ : int = checkpoint["out.2.weight"]
UpperCAmelCase__ : str = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : Optional[int] = strabool(args.class_cond)
_lowerCAmelCase : Optional[Any] = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Dict = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase : str = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase : Optional[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase : Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase : str = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_lowerCAmelCase : Dict = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 438 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''MobileViTFeatureExtractor''']
__lowerCamelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
from scipy.stats import spearmanr
import datasets
UpperCamelCase = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def __a ( self :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=False ):
UpperCamelCase__ :Any = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 45 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = OpenAIGPTTokenizer
a = OpenAIGPTTokenizerFast
a = True
a = False
def A ( self : Any):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_A : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE))))
_A : Dict = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any]):
return "lower newer", "lower newer"
def A ( self : Optional[int]):
_A : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
_A : int = 'lower'
_A : Any = ['low', 'er</w>']
_A : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = tokens + ['<unk>']
_A : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : Tuple=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
_A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# Simple input
_A : Dict = 'This is a simple input'
_A : Dict = ['This is a simple input 1', 'This is a simple input 2']
_A : Any = ('This is a simple input', 'This is a pair')
_A : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length')
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length')
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length')
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length')
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def A ( self : Any):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCamelCase ( a_ ):
"""simple docstring"""
pass
| 128 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> None:
A = order
# a_{0} ... a_{k}
A = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A = [0.0] * self.order
# y[n-1] ... y[n-k]
A = [0.0] * self.order
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> None:
if len(lowerCamelCase_ ) < self.order:
A = [1.0, *a_coeffs]
if len(lowerCamelCase_ ) != self.order + 1:
A = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(lowerCamelCase_ )}'
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != self.order + 1:
A = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(lowerCamelCase_ )}'
)
raise ValueError(lowerCamelCase_ )
A = a_coeffs
A = b_coeffs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> float:
A = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A = self.input_history[:-1]
A = self.output_history[:-1]
A = sample
A = result
return result
| 714 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 255 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''attention_mask''']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=80 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
lowerCAmelCase = num_mel_bins
lowerCAmelCase = do_ceptral_normalize
lowerCAmelCase = normalize_means
lowerCAmelCase = normalize_vars
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 )
lowerCAmelCase = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase__ ( _snake_case , _snake_case , _snake_case = True , _snake_case = True , _snake_case = 0.0 , ):
"""simple docstring"""
if normalize_means:
lowerCAmelCase = x[:input_length].mean(axis=0 )
lowerCAmelCase = np.subtract(_snake_case , _snake_case )
if normalize_vars:
lowerCAmelCase = x[:input_length].std(axis=0 )
lowerCAmelCase = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
lowerCAmelCase = padding_value
# make sure array is in float32
lowerCAmelCase = x.astype(np.floataa )
return x
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [raw_speech]
# extract fbank features
lowerCAmelCase = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase = BatchFeature({'input_features': features} )
lowerCAmelCase = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=_snake_case )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 4 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = DDIMPipeline
__UpperCAmelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__UpperCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : int = False
def __snake_case ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__snake_case : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
__snake_case : Optional[Any] = DDIMScheduler()
__snake_case : int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Tuple=0 ) -> Dict:
if str(UpperCAmelCase__ ).startswith("mps" ):
__snake_case : int = torch.manual_seed(UpperCAmelCase__ )
else:
__snake_case : Dict = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__snake_case : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : int ) -> List[Any]:
__snake_case : Dict = '''cpu'''
__snake_case : Any = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__snake_case : Any = self.get_dummy_inputs(UpperCAmelCase__ )
__snake_case : Optional[int] = pipe(**UpperCAmelCase__ ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__snake_case : Dict = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__snake_case : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def __snake_case ( self : Tuple ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __snake_case ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any ) -> int:
__snake_case : List[str] = '''google/ddpm-cifar10-32'''
__snake_case : str = UNetaDModel.from_pretrained(UpperCAmelCase__ )
__snake_case : List[Any] = DDIMScheduler()
__snake_case : Tuple = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddim.to(UpperCAmelCase__ )
ddim.set_progress_bar_config(disable=UpperCAmelCase__ )
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : str = ddim(generator=UpperCAmelCase__ , eta=0.0 , output_type="numpy" ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : int = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Dict ) -> Union[str, Any]:
__snake_case : str = '''google/ddpm-ema-bedroom-256'''
__snake_case : Optional[int] = UNetaDModel.from_pretrained(UpperCAmelCase__ )
__snake_case : Optional[int] = DDIMScheduler.from_pretrained(UpperCAmelCase__ )
__snake_case : Tuple = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddpm.to(UpperCAmelCase__ )
ddpm.set_progress_bar_config(disable=UpperCAmelCase__ )
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : str = ddpm(generator=UpperCAmelCase__ , output_type="numpy" ).images
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__snake_case : Optional[Any] = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 718 |
from __future__ import annotations
_snake_case : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : dict[str, list[str]] , lowerCamelCase : str ) -> None:
__snake_case : Tuple = graph
# mapping node to its parent in resulting breadth first tree
__snake_case : dict[str, str | None] = {}
__snake_case : Dict = source_vertex
def __snake_case ( self : Optional[int] ) -> None:
__snake_case : Dict = {self.source_vertex}
__snake_case : List[str] = None
__snake_case : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
__snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase )
__snake_case : Any = vertex
queue.append(lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case : Optional[Any] = self.parent.get(lowerCamelCase )
if target_vertex_parent is None:
__snake_case : Optional[Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCamelCase )
return self.shortest_path(lowerCamelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_snake_case : Optional[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 203 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """audio-spectrogram-transformer"""
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=16 , snake_case=True , snake_case=10 , snake_case=10 , snake_case=1024 , snake_case=128 , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = qkv_bias
lowercase = frequency_stride
lowercase = time_stride
lowercase = max_length
lowercase = num_mel_bins
| 84 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ord(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
class a_ :
def __init__( self :Any , _lowercase :int , _lowercase :List[Any]=None , _lowercase :Tuple=None) -> str:
UpperCAmelCase_ = data
UpperCAmelCase_ = previous
UpperCAmelCase_ = next_node
def __str__( self :Union[str, Any]) -> str:
return f"{self.data}"
def __a ( self :Optional[int]) -> int:
return self.data
def __a ( self :Any) -> int:
return self.next
def __a ( self :Union[str, Any]) -> Optional[int]:
return self.previous
class a_ :
def __init__( self :Union[str, Any] , _lowercase :str) -> str:
UpperCAmelCase_ = head
def __iter__( self :Any) -> List[str]:
return self
def __a ( self :str) -> Dict:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ = self.current.get_data()
UpperCAmelCase_ = self.current.get_next()
return value
class a_ :
def __init__( self :Union[str, Any]) -> str:
UpperCAmelCase_ = None # First node in list
UpperCAmelCase_ = None # Last node in list
def __str__( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.head
UpperCAmelCase_ = []
while current is not None:
nodes.append(current.get_data())
UpperCAmelCase_ = current.get_next()
return " ".join(str(_lowercase) for node in nodes)
def __contains__( self :Union[str, Any] , _lowercase :int) -> Any:
UpperCAmelCase_ = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ = current.get_next()
return False
def __iter__( self :int) -> int:
return LinkedListIterator(self.head)
def __a ( self :Any) -> Any:
if self.head:
return self.head.get_data()
return None
def __a ( self :Optional[Any]) -> Any:
if self.tail:
return self.tail.get_data()
return None
def __a ( self :Union[str, Any] , _lowercase :Node) -> None:
if self.head is None:
UpperCAmelCase_ = node
UpperCAmelCase_ = node
else:
self.insert_before_node(self.head , _lowercase)
def __a ( self :int , _lowercase :Node) -> None:
if self.head is None:
self.set_head(_lowercase)
else:
self.insert_after_node(self.tail , _lowercase)
def __a ( self :Tuple , _lowercase :int) -> None:
UpperCAmelCase_ = Node(_lowercase)
if self.head is None:
self.set_head(_lowercase)
else:
self.set_tail(_lowercase)
def __a ( self :Optional[int] , _lowercase :Node , _lowercase :Node) -> None:
UpperCAmelCase_ = node
UpperCAmelCase_ = node.previous
if node.get_previous() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def __a ( self :Dict , _lowercase :Node , _lowercase :Node) -> None:
UpperCAmelCase_ = node
UpperCAmelCase_ = node.next
if node.get_next() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def __a ( self :Tuple , _lowercase :int , _lowercase :int) -> None:
UpperCAmelCase_ = 1
UpperCAmelCase_ = Node(_lowercase)
UpperCAmelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(_lowercase , _lowercase)
return
current_position += 1
UpperCAmelCase_ = node.next
self.insert_after_node(self.tail , _lowercase)
def __a ( self :Tuple , _lowercase :int) -> Node:
UpperCAmelCase_ = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ = node.get_next()
raise Exception('''Node not found''')
def __a ( self :Any , _lowercase :Any) -> Optional[Any]:
if (node := self.get_node(_lowercase)) is not None:
if node == self.head:
UpperCAmelCase_ = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ = self.tail.get_previous()
self.remove_node_pointers(_lowercase)
@staticmethod
def __a ( _lowercase :Node) -> None:
if node.get_next():
UpperCAmelCase_ = node.previous
if node.get_previous():
UpperCAmelCase_ = node.next
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __a ( self :List[str]) -> Union[str, Any]:
return self.head is None
def A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Dict) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :str , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> int:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Optional[int] , _lowercase :str):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> str:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :Optional[Any]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Any) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Tuple):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :str) -> Dict:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[int]) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :List[str]):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 561 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
def a__ (__lowercase :Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(__lowercase , np.ndarray ):
return list(tensor.shape )
_A : Tuple = tf.shape(__lowercase )
if tensor.shape == tf.TensorShape(__lowercase ):
return dynamic
_A : Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowercase )]
def a__ (__lowercase :tf.Tensor , __lowercase :Optional[int] = None , __lowercase :Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowercase , name=__lowercase )
def a__ (__lowercase :Optional[Any] , __lowercase :Optional[Any] , __lowercase :str , __lowercase :Tuple=1e-5 , __lowercase :Dict=-1 ) -> str:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowercase , __lowercase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_A , _A : Dict = tf.nn.moments(__lowercase , axes=[axis] , keepdims=__lowercase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_A : Dict = [1] * inputs.shape.rank
_A : Optional[int] = shape_list(__lowercase )[axis]
_A : Optional[int] = tf.reshape(__lowercase , __lowercase )
_A : Any = tf.reshape(__lowercase , __lowercase )
# Compute layer normalization using the batch_normalization
# function.
_A : str = tf.nn.batch_normalization(
__lowercase , __lowercase , __lowercase , offset=__lowercase , scale=__lowercase , variance_epsilon=__lowercase , )
return outputs
def a__ (__lowercase :Any , __lowercase :str=0 , __lowercase :List[str]=-1 ) -> Optional[Any]:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_A : List[str] = tf.shape(__lowercase )
_A : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_A : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowercase , __lowercase )
def a__ (__lowercase :tf.Tensor ) -> tf.Tensor:
if not isinstance(__lowercase , tf.Tensor ):
_A : Union[str, Any] = tf.convert_to_tensor(__lowercase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_A : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_A : int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_A : List[str] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def a__ (__lowercase :tf.Tensor , __lowercase :int , __lowercase :str = "input_ids" ) -> None:
tf.debugging.assert_less(
__lowercase , tf.cast(__lowercase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__lowercase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def a__ (__lowercase :Dict , __lowercase :Any , __lowercase :Dict ) -> int:
_A : Union[str, Any] = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_A : Optional[Any] = [x for x in data if len(__lowercase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_A : Any = np.asarray(__lowercase )
_A : Optional[Any] = 1
_A : Union[str, Any] = np.array_split(__lowercase , __lowercase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_A : List[str] = np.array_split(__lowercase , __lowercase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowercase ):
_A : Tuple = chunk_data
else:
_A : Optional[int] = data
def a__ (__lowercase :Optional[Any] , __lowercase :List[str] ) -> Optional[Any]:
if name in group.attrs:
_A : List[str] = [n.decode('''utf8''' ) if hasattr(__lowercase , '''decode''' ) else n for n in group.attrs[name]]
else:
_A : Optional[Any] = []
_A : Tuple = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowercase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def a__ (__lowercase :List[Any] ) -> Optional[Any]:
def _expand_single_ad_tensor(__lowercase :Union[str, Any] ):
if isinstance(__lowercase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowercase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowercase )
| 206 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any ={
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : Optional[int] =[
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
lowerCAmelCase : Any =[
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
lowerCAmelCase : Any =(
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase : Tuple =(
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase : List[Any] =[
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def A__ ( __A , __A ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCamelCase : List[Any] = k.replace(__A , __A )
return k
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = BigBirdPegasusConfig(**__A )
_lowerCamelCase : str = BigBirdPegasusForConditionalGeneration(__A )
_lowerCamelCase : Optional[Any] = torch_model.state_dict()
_lowerCamelCase : Any = {}
# separating decoder weights
_lowerCamelCase : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_lowerCamelCase : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
_lowerCamelCase : List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
_lowerCamelCase : Any = DECODER_PATTERNS
_lowerCamelCase : str = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_lowerCamelCase : Optional[Any] = v.T
_lowerCamelCase : Optional[int] = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
_lowerCamelCase : int = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
_lowerCamelCase : int = REMAINING_PATTERNS
_lowerCamelCase : Dict = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_lowerCamelCase : str = v.T
_lowerCamelCase : int = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCamelCase : List[str] = mapping["""model.embed_positions.weight"""]
_lowerCamelCase : int = mapping.pop("""model.embed_positions.weight""" )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = torch_model.load_state_dict(__A , strict=__A )
_lowerCamelCase : Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = tf.train.list_variables(__A )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = ["""global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
_lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Optional[Any] = tf.train.load_variable(__A , __A )
_lowerCamelCase : Union[str, Any] = array
return tf_weights
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = get_tf_weights_as_numpy(__A )
_lowerCamelCase : int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase : int =parser.parse_args()
lowerCAmelCase : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=False , snake_case=True , snake_case=False , snake_case=True , snake_case=33 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = EsmModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = EsmForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = EsmForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = False
_UpperCamelCase : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = ()
_UpperCamelCase : Optional[Any] = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = EsmModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = EsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=snake_case )
lowercase = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase = create_position_ids_from_input_ids(snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case , snake_case ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=snake_case )
lowercase = torch.empty(2 , 4 , 30 )
lowercase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase = embeddings.create_position_ids_from_inputs_embeds(snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case , snake_case ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_torch
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
with torch.no_grad():
lowercase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase = model(snake_case )[0]
lowercase = 33
lowercase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , snake_case )
lowercase = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
with torch.no_grad():
lowercase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase = model(snake_case )[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
| 84 |
# Algorithm for the pigeonhole sorting
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= min(lowercase__ ) # min() finds the minimum value
__lowercase= max(lowercase__ ) # max() finds the maximum value
__lowercase= max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase= [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase__ , lowercase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase= 0
for count in range(lowercase__ ):
while holes[count] > 0:
holes[count] -= 1
__lowercase= count + min_val
i += 1
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase__ )
print('Sorted order is:' , ' '.join(lowercase__ ) )
if __name__ == "__main__":
main()
| 230 | 0 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Sql(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , sql=lowerCAmelCase__ , con=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , )
# Build dataset for splits
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split='train' , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = name
SCREAMING_SNAKE_CASE = con
SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = to_sql_kwargs
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('sql' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('con' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('index' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self._write(index=lowerCAmelCase__ , **self.to_sql_kwargs )
return written
def __A ( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = args
SCREAMING_SNAKE_CASE = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE = batch.to_pandas()
SCREAMING_SNAKE_CASE = df.to_sql(self.name , self.con , index=lowerCAmelCase__ , **lowerCAmelCase__ )
return num_rows or len(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 327 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=lowerCamelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ) -> Tuple:
if self.train_file is not None:
SCREAMING_SNAKE_CASE = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = 'label' if 'label' in features[0].keys() else 'labels'
SCREAMING_SNAKE_CASE = [feature.pop(lowerCAmelCase__ ) for feature in features]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = len(features[0]['input_ids'] )
SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
SCREAMING_SNAKE_CASE = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def lowercase () -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = data_args.validation_file
SCREAMING_SNAKE_CASE = data_args.train_file.split('.' )[-1]
SCREAMING_SNAKE_CASE = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE = [F'ending{i}' for i in range(4 )]
SCREAMING_SNAKE_CASE = 'sent1'
SCREAMING_SNAKE_CASE = 'sent2'
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
SCREAMING_SNAKE_CASE = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_ : Dict ):
SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE = examples[question_header_name]
SCREAMING_SNAKE_CASE = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
SCREAMING_SNAKE_CASE = list(chain(*SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
SCREAMING_SNAKE_CASE = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
SCREAMING_SNAKE_CASE = raw_datasets['train']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
SCREAMING_SNAKE_CASE = raw_datasets['validation']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
SCREAMING_SNAKE_CASE = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_ : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_predictions
SCREAMING_SNAKE_CASE = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 1 |
def _lowerCamelCase( __snake_case , __snake_case ) -> List[Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def _lowerCamelCase( __snake_case , __snake_case ) -> float:
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 524 | import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase__ = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _lowerCamelCase( __snake_case ) -> Tuple:
__snake_case = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __snake_case )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> Optional[int]:
__snake_case = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__snake_case ):
__snake_case = None
if _re_tf_models.match(__snake_case ) is not None:
__snake_case = tf_models
__snake_case = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
__snake_case = flax_models
__snake_case = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
__snake_case = pt_models
__snake_case = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_prefix_to_model_type:
__snake_case = True
break
# Try again after removing the last word in the name
__snake_case = "".join(camel_case_split(__snake_case )[:-1] )
__snake_case = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__snake_case = list(__snake_case )
all_models.sort()
__snake_case = {"model_type": all_models}
__snake_case = [pt_models[t] for t in all_models]
__snake_case = [tf_models[t] for t in all_models]
__snake_case = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__snake_case = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__snake_case = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__snake_case = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__snake_case = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__snake_case = "AutoTokenizer"
__snake_case = [processors[t] for t in all_models]
return pd.DataFrame(__snake_case )
def _lowerCamelCase( __snake_case ) -> List[Any]:
__snake_case = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__snake_case = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__snake_case = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__snake_case , __snake_case , __snake_case ):
# The type of pipeline may not exist in this framework
if not hasattr(__snake_case , __snake_case ):
continue
# First extract all model_names
__snake_case = []
for name in getattr(__snake_case , __snake_case ).values():
if isinstance(__snake_case , __snake_case ):
model_names.append(__snake_case )
else:
model_names.extend(list(__snake_case ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( __snake_case , __snake_case ) -> Union[str, Any]:
__snake_case = get_frameworks_table()
__snake_case = Dataset.from_pandas(__snake_case )
__snake_case = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__snake_case )
__snake_case = Dataset.from_json(__snake_case )
__snake_case = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__snake_case ) )
}
__snake_case = update_pipeline_and_auto_class_table(__snake_case )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__snake_case = sorted(table.keys() )
__snake_case = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__snake_case = Dataset.from_pandas(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__snake_case , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__snake_case , "pipeline_tags.json" ) )
if commit_sha is not None:
__snake_case = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__snake_case = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__snake_case , repo_type="dataset" , token=__snake_case , commit_message=__snake_case , )
def _lowerCamelCase( ) -> Union[str, Any]:
__snake_case = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__snake_case = transformers_module.pipelines.SUPPORTED_TASKS
__snake_case = []
for key in pipeline_tasks:
if key not in in_table:
__snake_case = pipeline_tasks[key]["pt"]
if isinstance(__snake_case , (list, tuple) ):
__snake_case = model[0]
__snake_case = model.__name__
if model not in in_table.values():
missing.append(__snake_case )
if len(__snake_case ) > 0:
__snake_case = ", ".join(__snake_case )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 524 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {"vocab_file": "spiece.model"}
lowerCamelCase__ : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[int]="<s>" , lowerCamelCase_ :int="</s>" , lowerCamelCase_ :Dict="<unk>" , lowerCamelCase_ :Tuple="<sep>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :str="<cls>" , lowerCamelCase_ :Union[str, Any]="<mask>" , lowerCamelCase_ :List[Any]=["<eop>", "<eod>"] , lowerCamelCase_ :Optional[Dict[str, Any]] = None , **lowerCamelCase_ :int , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : List[str] = do_lower_case
SCREAMING_SNAKE_CASE : int = remove_space
SCREAMING_SNAKE_CASE : Tuple = keep_accents
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
SCREAMING_SNAKE_CASE : List[str] = jieba
SCREAMING_SNAKE_CASE : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self :Any , lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE : List[str] = inputs
SCREAMING_SNAKE_CASE : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE : Union[str, Any] = unicodedata.normalize('''NFKD''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : List[str] = outputs.lower()
return outputs
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.preprocess_text(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE : List[str] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ''''''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def __lowerCAmelCase ( self :Optional[int] , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super()._decode(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False , **_A ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Optional[Any] = d_embed
UpperCamelCase : List[Any] = d_proj
UpperCamelCase : List[Any] = cutoffs + [vocab_size]
UpperCamelCase : Any = [0] + self.cutoffs
UpperCamelCase : Union[str, Any] = div_val
UpperCamelCase : Dict = self.cutoffs[0]
UpperCamelCase : Any = len(self.cutoffs ) - 1
UpperCamelCase : Optional[Any] = self.shortlist_size + self.n_clusters
UpperCamelCase : int = keep_order
UpperCamelCase : Any = []
UpperCamelCase : Optional[int] = []
def _a ( self , _A ):
'''simple docstring'''
if self.n_clusters > 0:
UpperCamelCase : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_A , name="""cluster_weight""" )
UpperCamelCase : Optional[int] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_A , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_A , name=f"""out_projs_._{i}""" , )
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
UpperCamelCase : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase : int = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase : int = self.d_embed // (self.div_val**i)
UpperCamelCase : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_A , name=f"""out_projs_._{i}""" )
self.out_projs.append(_A )
UpperCamelCase : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase : Optional[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def _a ( _A , _A , _A , _A=None ):
'''simple docstring'''
UpperCamelCase : List[str] = x
if proj is not None:
UpperCamelCase : List[str] = tf.einsum("""ibd,ed->ibe""" , _A , _A )
return tf.einsum("""ibd,nd->ibn""" , _A , _A ) + b
@staticmethod
def _a ( _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = shape_list(_A )
UpperCamelCase : Any = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase : str = tf.stack([r, target] , 1 )
return tf.gather_nd(_A , _A )
def _a ( self , _A , _A , _A=True , _A=False ):
'''simple docstring'''
UpperCamelCase : int = 0
if self.n_clusters == 0:
UpperCamelCase : Dict = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A )
UpperCamelCase : List[str] = tf.nn.log_softmax(_A , axis=-1 )
else:
UpperCamelCase : Any = shape_list(_A )
UpperCamelCase : Any = []
UpperCamelCase : Optional[int] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase : List[str] = (target >= l_idx) & (target < r_idx)
UpperCamelCase : List[str] = tf.where(_A )
UpperCamelCase : Union[str, Any] = tf.boolean_mask(_A , _A ) - l_idx
if self.div_val == 1:
UpperCamelCase : str = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase : Tuple = self.out_layers[i][0]
UpperCamelCase : Optional[Any] = self.out_layers[i][1]
if i == 0:
UpperCamelCase : str = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase : List[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase : Optional[int] = self._logit(_A , _A , _A , self.out_projs[0] )
UpperCamelCase : Optional[int] = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase : Dict = tf.boolean_mask(_A , _A )
UpperCamelCase : Optional[Any] = self._gather_logprob(_A , _A )
else:
UpperCamelCase : Tuple = self._logit(_A , _A , _A , self.out_projs[i] )
UpperCamelCase : str = tf.nn.log_softmax(_A )
UpperCamelCase : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase : Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
UpperCamelCase : List[Any] = tf.boolean_mask(_A , _A )
UpperCamelCase : int = tf.boolean_mask(_A , _A )
UpperCamelCase : Optional[Any] = self._gather_logprob(_A , _A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A ) )
UpperCamelCase : int = tf.concat(_A , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase : Union[str, Any] = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class _a :
def __init__( self: Any , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = tree
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: Any ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool:
# Base Case
if curr_ind == len(__SCREAMING_SNAKE_CASE ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
if valid_connection(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Insert current vertex into path as next transition
_SCREAMING_SNAKE_CASE : Dict = next_ver
# Validate created path
if util_hamilton_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , curr_ind + 1 ):
return True
# Backtrack
_SCREAMING_SNAKE_CASE : int = -1
return False
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 )-> list[int]:
_SCREAMING_SNAKE_CASE : Tuple = [-1] * (len(__SCREAMING_SNAKE_CASE ) + 1)
# initialize start and end of path with starting index
_SCREAMING_SNAKE_CASE : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) else []
| 635 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 1 |
'''simple docstring'''
UpperCamelCase : Optional[int] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCamelCase : Tuple = ['a', 'b', 'c', 'd', 'e']
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
lowerCamelCase__ = start
# add current to visited
visited.append(__lowerCAmelCase )
lowerCamelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase : int = topological_sort('a', [], [])
print(sort)
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 1 |
"""simple docstring"""
import cmath
import math
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->complex:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = math.radians(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
__UpperCAmelCase : Dict = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=0 ) ->Dict:
"""simple docstring"""
return sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[column] )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase_ ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase_ ):
__UpperCAmelCase : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase_ , UpperCAmelCase_ )
# recursion
__UpperCAmelCase : Any = points_counts // 2
__UpperCAmelCase : Any = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[:mid] , UpperCAmelCase_ )
__UpperCAmelCase : Tuple = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : int = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = dis_between_closest_in_strip(
UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return min(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = column_based_sort(UpperCAmelCase_ , column=0 )
__UpperCAmelCase : Any = column_based_sort(UpperCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
lowercase__ :Optional[Any] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 374 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case :int ={
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A = {}
state_dict.pop('pixel_mean' , lowerCAmelCase__ )
state_dict.pop('pixel_std' , lowerCAmelCase__ )
A = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
A = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
A = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
A = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
A = key.replace('layers.2' , 'proj_out' )
A = value
A = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict="ybelkada/segment-anything" ) -> Dict:
'''simple docstring'''
A = hf_hub_download(lowerCAmelCase__ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
A = SamConfig()
elif "sam_vit_l" in model_name:
A = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
A = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
A = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
A = SamConfig(
vision_config=lowerCAmelCase__ , )
A = torch.load(lowerCAmelCase__ , map_location='cpu' )
A = replace_keys(lowerCAmelCase__ )
A = SamImageProcessor()
A = SamProcessor(image_processor=lowerCAmelCase__ )
A = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
A = hf_model.to('cuda' )
A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
A = [[[400, 650]]]
A = [[1]]
A = processor(images=np.array(lowerCAmelCase__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
A = hf_model(**lowerCAmelCase__ )
A = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
A = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
A = hf_model(**lowerCAmelCase__ )
A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
A = ((75, 275, 1725, 850),)
A = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
A = hf_model(**lowerCAmelCase__ )
A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
A = [[[400, 650], [800, 650]]]
A = [[1, 1]]
A = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
A = hf_model(**lowerCAmelCase__ )
A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser()
__snake_case :str =['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__snake_case :List[Any] =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 106 |
import re
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = VQModel
__A = '''sample'''
@property
def __UpperCAmelCase ( self : int , lowercase_ : Any=(32, 32)) -> List[str]:
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
return {"sample": image}
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
_UpperCamelCase = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(lowercase_).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_UpperCamelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_UpperCamelCase = image.to(lowercase_)
with torch.no_grad():
_UpperCamelCase = model(lowercase_).sample
_UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43])
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = 0
A__ = 0
A__ = {}
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if vertex not in self.adjacency:
A__ = {}
self.num_vertices += 1
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
self.add_vertex(UpperCamelCase__ )
self.add_vertex(UpperCamelCase__ )
if head == tail:
return
A__ = weight
A__ = weight
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase__ ) ):
A__ = list(edges[i] )
edges.sort(key=lambda UpperCamelCase__ : e[2] )
for i in range(len(UpperCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A__ = edges[i][2] + 1
for edge in edges:
A__ , A__ , A__ = edge
A__ = weight
A__ = weight
def __str__( self ):
'''simple docstring'''
A__ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A__ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def lowercase_ ( self ):
'''simple docstring'''
A__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = Graph()
if vertices is None:
A__ = []
if edges is None:
A__ = []
for vertex in vertices:
g.add_vertex(UpperCamelCase__ )
for edge in edges:
g.add_edge(*UpperCamelCase__ )
return g
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = {}
A__ = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if item in self.parent:
return self.find(UpperCamelCase__ )
A__ = item
A__ = 0
return item
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(UpperCamelCase__ )
if item != self.parent[item]:
A__ = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.find(UpperCamelCase__ )
A__ = self.find(UpperCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A__ = roota
return roota
return None
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
A__ = graph.num_vertices
A__ = Graph.UnionFind()
A__ = []
while num_components > 1:
A__ = {}
for vertex in graph.get_vertices():
A__ = -1
A__ = graph.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
A__ , A__ , A__ = edge
A__ = union_find.find(UpperCamelCase__ )
A__ = union_find.find(UpperCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A__ , A__ , A__ = cheap_edge[vertex]
if union_find.find(UpperCamelCase__ ) != union_find.find(UpperCamelCase__ ):
union_find.union(UpperCamelCase__ , UpperCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
A__ = num_components - 1
A__ = Graph.build(edges=UpperCamelCase__ )
return mst | 337 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=5_12 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = weight_tying
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def lowercase_ ( self ):
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
A__ = output_from_no_past["hidden_states"][0]
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase__ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase__ : Optional[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase__ : Any = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : str = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "abeja/gpt-neox-japanese-2.7b"
A__ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
A__ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
A__ = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
A__ = []
for prompt in prompts:
A__ = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids
A__ = model.generate(UpperCamelCase__ , max_length=50 )
A__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) | 337 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_UpperCamelCase : Tuple = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = '''facebook/nllb-200-distilled-600M'''
lowerCamelCase__ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowerCamelCase__ = '''translator'''
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ['''text''', '''text''', '''text''']
lowerCamelCase__ = ['''text''']
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Any:
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
__A = self.lang_to_code[src_lang]
__A = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase , return_tensors='''pt''' , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Optional[Any]:
return self.model.generate(**UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Union[str, Any]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase )
| 712 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"{test_file} instead." )
__A = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__A = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__A = '''.'''.join(snake_case )
return test_module_path
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = get_module_path(snake_case )
__A = importlib.import_module(snake_case )
return test_module
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
__A = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__A = getattr(snake_case , '''all_model_classes''' , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> str:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = test_class()
if hasattr(snake_case , '''setUp''' ):
test.setUp()
__A = None
if hasattr(snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__A = test.model_tester.__class__
return model_tester
def __UpperCamelCase ( snake_case , snake_case ) -> Dict:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = get_test_classes_for_model(snake_case , snake_case )
__A = []
for test_class in test_classes:
__A = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase ( snake_case ) -> Optional[int]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase ( snake_case ) -> Tuple:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 341 | 0 |
'''simple docstring'''
lowercase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
A : Any = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(snake_case__ )
A : List[Any] = ''''''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
A : Any = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A : Optional[int] = B'''=''' * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
A : Tuple = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case__ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
A : Optional[Any] = (
'''argument should be a bytes-like object or ASCII string, '''
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ , snake_case__ ):
try:
A : Tuple = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
A : Optional[int] = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A : Tuple = encoded_data[:-padding]
A : Tuple = ''''''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A : int = ''''''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
A : Tuple = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case__ ) , 8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__()
A : List[Any] = module
A : Any = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE ) , )
A : List[str] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) + self.adapter(SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__magic_name__ = '''bigscience/bloom-1b7'''
# Constant values
__magic_name__ = 2.1_09_65_95_52_69_25_74
__magic_name__ = '''Hello my name is'''
__magic_name__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__magic_name__ = 10
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = AutoTokenizer.from_pretrained(self.model_name )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A : List[str] = config.to_dict()
A : List[Any] = config.to_diff_dict()
A : Union[str, Any] = config.to_json_string()
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
A : Optional[Any] = self.model_fpaa.get_memory_footprint()
A : Any = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : List[str] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = BitsAndBytesConfig()
A : Optional[Any] = True
A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : Tuple = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A : int = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : Dict = self.model_fpaa.to(torch.floataa )
A : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A : List[str] = self.model_fpaa.half()
# Check this does not throw an error
A : Tuple = self.model_fpaa.float()
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
"""simple docstring"""
A : int = '''t5-small'''
A : Any = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
A : List[str] = '''Translate in German: Hello, my dog is cute'''
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
A : int = TaForConditionalGeneration._keep_in_fpaa_modules
A : Optional[Any] = None
# test with `t5-small`
A : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : List[Any] = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : List[Any] = model.generate(**SCREAMING_SNAKE_CASE )
A : str = modules
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : Any = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : Tuple = model.generate(**SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# model_name
A : Tuple = '''bigscience/bloom-560m'''
A : int = '''t5-small'''
# Different types of model
A : Optional[int] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A : Optional[Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = '''facebook/opt-350m'''
super().setUp()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A : Optional[int] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE ) ):
A : Tuple = LoRALayer(module.q_proj , rank=16 )
A : Tuple = LoRALayer(module.k_proj , rank=16 )
A : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A : Dict = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A : str = model.forward(**SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( __snake_case ):
__magic_name__ = '''gpt2-xl'''
__magic_name__ = 3.31_91_85_48_54_15_21_87
| 634 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : Dict = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : int = mask_ratio
UpperCamelCase : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase : Any = (image_size // patch_size) ** 2
UpperCamelCase : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[Any] = model(lowerCamelCase )
UpperCamelCase : List[Any] = (self.image_size // self.patch_size) ** 2
UpperCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(lowerCamelCase )
UpperCamelCase : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = ViTMAEModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase )
UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase : Optional[int] = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase : Any = outputs[0].cpu().numpy()
UpperCamelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
UpperCamelCase : str = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
UpperCamelCase : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCamelCase : List[str] = 0
UpperCamelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : Tuple = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : int = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase : List[str] = ViTMAEConfig()
UpperCamelCase : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
UpperCamelCase : Any = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
UpperCamelCase : Optional[int] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1e-4 ) )
| 435 |
'''simple docstring'''
lowerCAmelCase_ = 0 # The first color of the flag.
lowerCAmelCase_ = 1 # The second color of the flag.
lowerCAmelCase_ = 2 # The third color of the flag.
lowerCAmelCase_ = (red, white, blue)
def A__ ( A : list):
'''simple docstring'''
if not sequence:
return []
if len(A) == 1:
return list(A)
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Any = len(A) - 1
UpperCamelCase : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase , UpperCamelCase : List[str] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase , UpperCamelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase : Union[str, Any] = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(A)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = input('Enter numbers separated by commas:\n').strip()
lowerCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 435 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 495 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
A : Tuple = "docs/source/en/_toctree.yml"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = defaultdict(UpperCamelCase__ )
__lowerCAmelCase = []
__lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(UpperCamelCase__ )
__lowerCAmelCase = new_doc_list
__lowerCAmelCase = [key for key, value in counts.items() if value > 1]
__lowerCAmelCase = []
for duplicate_key in duplicates:
__lowerCAmelCase = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
__lowerCAmelCase = sorted(UpperCamelCase__ , key=lambda _UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError("{doc_list} has two \'overview\' docs which is not allowed." )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
with open(UpperCamelCase__ , encoding="utf-8" ) as f:
__lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCAmelCase = content[api_idx]["sections"]
# Then to the model doc
__lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCAmelCase = api_doc[scheduler_idx]["sections"]
__lowerCAmelCase = clean_doc_toc(UpperCamelCase__ )
__lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
__lowerCAmelCase = True
if overwrite:
__lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
__lowerCAmelCase = api_doc
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
with open(UpperCamelCase__ , encoding="utf-8" ) as f:
__lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCAmelCase = content[api_idx]["sections"]
# Then to the model doc
__lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCAmelCase = False
__lowerCAmelCase = api_doc[pipeline_idx]["sections"]
__lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCAmelCase = pipeline_doc["section"]
__lowerCAmelCase = clean_doc_toc(UpperCamelCase__ )
if overwrite:
__lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
__lowerCAmelCase = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
__lowerCAmelCase = True
if overwrite:
__lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
__lowerCAmelCase = api_doc
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 703 |
"""simple docstring"""
import cmath
import math
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = math.radians(_UpperCamelCase )
__lowerCAmelCase = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
__lowerCAmelCase = cmath.rect(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : List[Any] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase , revision='''aaaaaa''' )
def lowercase__ ( self : List[str] ):
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : int = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 35 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = int(a )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_lowercase = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 356 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowerCAmelCase : Union[str, Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Any = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCamelCase : int = bs[:]
_lowerCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
_lowerCamelCase : Any = [chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = set()
_lowerCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : str = char
return pairs
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
def __init__( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]="replace" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : Dict="<s>" , _UpperCamelCase : str="<unk>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple=False , **_UpperCamelCase : int , ) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else bos_token
_lowerCamelCase : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else eos_token
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else sep_token
_lowerCamelCase : Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else cls_token
_lowerCamelCase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else unk_token
_lowerCamelCase : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Any = json.load(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : int = errors # how to handle errors in decoding
_lowerCamelCase : Any = bytes_to_unicode()
_lowerCamelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding="""utf-8""") as merges_handle:
_lowerCamelCase : Optional[int] = merges_handle.read().split("""\n""")[1:-1]
_lowerCamelCase : Optional[int] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCamelCase : int = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase : Optional[Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[str] = tuple(_UpperCamelCase)
_lowerCamelCase : str = get_pairs(_UpperCamelCase)
if not pairs:
return token
while True:
_lowerCamelCase : int = min(_UpperCamelCase , key=lambda _UpperCamelCase: self.bpe_ranks.get(_UpperCamelCase , float("""inf""")))
if bigram not in self.bpe_ranks:
break
_lowerCamelCase : Optional[Any] = bigram
_lowerCamelCase : int = []
_lowerCamelCase : Any = 0
while i < len(_UpperCamelCase):
try:
_lowerCamelCase : Union[str, Any] = word.index(_UpperCamelCase , _UpperCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCamelCase : Optional[int] = j
if word[i] == first and i < len(_UpperCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCamelCase : Optional[Any] = tuple(_UpperCamelCase)
_lowerCamelCase : List[Any] = new_word
if len(_UpperCamelCase) == 1:
break
else:
_lowerCamelCase : Dict = get_pairs(_UpperCamelCase)
_lowerCamelCase : List[Any] = """ """.join(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = []
for token in re.findall(self.pat , _UpperCamelCase):
_lowerCamelCase : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase).split(""" """))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """""".join(_UpperCamelCase)
_lowerCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
_lowerCamelCase : Dict = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Any = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
_lowerCamelCase : Union[str, Any] = 0
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""")
_lowerCamelCase : Dict = token_index
writer.write(""" """.join(_UpperCamelCase) + """\n""")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
_lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False , **_UpperCamelCase : str) ->int:
"""simple docstring"""
_lowerCamelCase : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase) > 0 and not text[0].isspace()):
_lowerCamelCase : Dict = """ """ + text
return (text, kwargs)
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( __a=None ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: str =argparse.ArgumentParser(add_help=__a , allow_abbrev=__a )
# The main config parser
lowerCamelCase__: Any =config_command_parser(__a )
# The subparser to add commands to
lowerCamelCase__: int =config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(__a , parents=[parent_parser] )
update_command_parser(__a , parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =get_config_parser()
lowerCamelCase__: Dict =config_parser.parse_args()
if not hasattr(__a , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 59 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = job['''started_at''']
lowerCAmelCase__ = job['''completed_at''']
lowerCAmelCase__ = date_parser.parse(a_ )
lowerCAmelCase__ = date_parser.parse(a_ )
lowerCAmelCase__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase__ = start
lowerCAmelCase__ = end
lowerCAmelCase__ = duration_in_min
return job_info
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
lowerCAmelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase__ = requests.get(a_ , headers=a_ ).json()
lowerCAmelCase__ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
lowerCAmelCase__ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(a_ ):
lowerCAmelCase__ = requests.get(url + F'&page={i + 2}' , headers=a_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = get_job_time(args.workflow_run_id)
UpperCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 705 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : list[int | str] ):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase_ , [] , 0 , [0 for i in range(len(lowerCAmelCase_ ) )] )
def _A ( lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , ):
"""simple docstring"""
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
for i in range(len(lowerCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ = True
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ )
current_sequence.pop()
lowerCAmelCase__ = False
UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 125 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'xlm-roberta'
def __init__( self : List[Any] , lowerCamelCase : str=3_05_22 , lowerCamelCase : int=7_68 , lowerCamelCase : Tuple=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=30_72 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Dict=5_12 , lowerCamelCase : List[str]=2 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1E-12 , lowerCamelCase : List[Any]=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Any="absolute" , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Dict=None , **lowerCamelCase : List[Any] , ) -> List[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 275 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCamelCase__ = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = VOCAB_FILES_NAMES
snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = remove_space
UpperCamelCase__ = keep_accents
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.remove_space:
UpperCamelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCamelCase__ = inputs
UpperCamelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase__ = unicodedata.normalize("""NFKD""" , __lowerCAmelCase )
UpperCamelCase__ = """""".join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
UpperCamelCase__ = outputs.lower()
return outputs
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.preprocess_text(__lowerCAmelCase )
UpperCamelCase__ = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
UpperCamelCase__ = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ = cur_pieces[1:]
else:
UpperCamelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.sp_model.PieceToId(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
UpperCamelCase__ = """"""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
UpperCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 548 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase__( ):
"""simple docstring"""
__A= ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores',type=_SCREAMING_SNAKE_CASE,default=1,help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script',type=_SCREAMING_SNAKE_CASE,help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
),)
# rest from the training program
parser.add_argument('training_script_args',nargs=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase__( ):
"""simple docstring"""
__A= parse_args()
# Import training_script as a module.
__A= Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A= script_fpath.stem
__A= importlib.import_module(_SCREAMING_SNAKE_CASE )
# Patch sys.argv
__A= [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn,args=(),nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class a__ ( a_ ):
'''simple docstring'''
A : Union[str, Any] = '''roberta'''
def __init__( self : Any , lowerCAmelCase_ : int=50_265 , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=1E-12 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Any , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= hidden_act
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= type_vocab_size
__A= initializer_range
__A= layer_norm_eps
__A= position_embedding_type
__A= use_cache
__A= classifier_dropout
class a__ ( a_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 186 | 1 |
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = 0, 0, 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = min(A__ , A__ , A__ )
ugly_nums.append(A__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_00) = }''')
| 250 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
a_ :Union[str, Any] = 5
a_ :int = 10
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = SpeechaTextTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = True
def lowercase__ ( self : int ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Any = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : str = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowercase ) , 10_01 )
def lowercase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowercase__ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase : List[Any] = '''C\'est trop cool'''
lowerCamelCase : Any = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowercase__ ( self : Tuple ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : Optional[int] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''fr'''
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : int = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 250 | 1 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__lowerCamelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def a ( __snake_case : Tuple, __snake_case : str ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase_ :Optional[int] = k.replace(__snake_case, __snake_case )
return k
def a ( __snake_case : dict, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :str = BigBirdPegasusConfig(**__snake_case )
UpperCAmelCase_ :Optional[Any] = BigBirdPegasusForConditionalGeneration(__snake_case )
UpperCAmelCase_ :Dict = torch_model.state_dict()
UpperCAmelCase_ :List[Any] = {}
# separating decoder weights
UpperCAmelCase_ :Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase_ :Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :int = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :Union[str, Any] = DECODER_PATTERNS
UpperCAmelCase_ :Any = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :str = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :Any = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :str = REMAINING_PATTERNS
UpperCAmelCase_ :Dict = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :Any = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCAmelCase_ :Optional[int] = mapping['''model.embed_positions.weight''']
UpperCAmelCase_ :Tuple = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = torch_model.load_state_dict(__snake_case, strict=__snake_case )
UpperCAmelCase_ :List[Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def a ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Tuple = tf.train.list_variables(__snake_case )
UpperCAmelCase_ :Optional[int] = {}
UpperCAmelCase_ :Optional[Any] = ['''global_step''']
for name, shape in tqdm(__snake_case, desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ :List[str] = tf.train.load_variable(__snake_case, __snake_case )
UpperCAmelCase_ :str = array
return tf_weights
def a ( __snake_case : str, __snake_case : str, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :Any = get_tf_weights_as_numpy(__snake_case )
UpperCAmelCase_ :Union[str, Any] = convert_bigbird_pegasus(__snake_case, __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 608 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase : int = logging.get_logger(__name__)
class _A ( lowerCAmelCase__):
SCREAMING_SNAKE_CASE : Dict = ["input_features", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1_6000 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = num_mel_bins
SCREAMING_SNAKE_CASE_ : Any = do_ceptral_normalize
SCREAMING_SNAKE_CASE_ : List[str] = normalize_means
SCREAMING_SNAKE_CASE_ : Optional[Any] = normalize_vars
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = ta_kaldi.fbank(_SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0.0 , ):
"""simple docstring"""
if normalize_means:
SCREAMING_SNAKE_CASE_ : Any = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE_ : int = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if normalize_vars:
SCREAMING_SNAKE_CASE_ : Any = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : Dict = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.astype(np.floataa )
return x
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE_ : Any = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE_ : Tuple = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Dict = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[int] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : str = [self._extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : List[str] = BatchFeature({'input_features': features} )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE_ : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE_ : Any = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : str = self.normalize(
padded_inputs['input_features'] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Dict = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 709 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="fp4" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_in_abit
SCREAMING_SNAKE_CASE_ : Tuple = load_in_abit
SCREAMING_SNAKE_CASE_ : Dict = llm_inta_threshold
SCREAMING_SNAKE_CASE_ : Tuple = llm_inta_skip_modules
SCREAMING_SNAKE_CASE_ : Optional[int] = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE_ : Optional[Any] = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE_ : List[str] = bnb_abit_quant_type
SCREAMING_SNAKE_CASE_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
SCREAMING_SNAKE_CASE_ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = cls(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__} {self.to_json_string()}"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
if use_diff is True:
SCREAMING_SNAKE_CASE_ : int = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE_ : int = value
return serializable_config_dict
| 353 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Any:
UpperCamelCase__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Optional[int] = ''
else:
UpperCamelCase__ : Union[str, Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase__ : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Any = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : str = in_proj_bias[-config.hidden_size :]
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCamelCase__ : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCamelCase__ : str = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = dct.pop(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = val
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCamelCase__ : Optional[Any] = ViTMSNConfig()
UpperCamelCase__ : Dict = 1000
UpperCamelCase__ : Optional[Any] = 'datasets/huggingface/label-files'
UpperCamelCase__ : int = 'imagenet-1k-id2label.json'
UpperCamelCase__ : List[str] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 'r' ) )
UpperCamelCase__ : Optional[Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ : Tuple = 384
UpperCamelCase__ : Union[str, Any] = 1536
UpperCamelCase__ : int = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ : Dict = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : str = 24
UpperCamelCase__ : str = 16
UpperCamelCase__ : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ : str = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ : List[str] = 7
UpperCamelCase__ : int = 1024
UpperCamelCase__ : List[Any] = 4096
UpperCamelCase__ : Any = 24
UpperCamelCase__ : List[Any] = 16
UpperCamelCase__ : Union[str, Any] = 0.1
UpperCamelCase__ : Optional[int] = ViTMSNModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['target_encoder']
UpperCamelCase__ : str = ViTImageProcessor(size=config.image_size )
remove_projection_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ : Union[str, Any] = ViTImageProcessor(
size=config.image_size , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ : int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCamelCase__ : Any = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 410 |
import math
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCamelCase__ : Optional[int] = [True] * (end + 1)
UpperCamelCase__ : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = False
start += 1
prime += in_prime
UpperCamelCase__ : Union[str, Any] = end + 1
UpperCamelCase__ : Optional[Any] = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCamelCase__ : Dict = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase__ : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase__ : Optional[int] = high + 1
UpperCamelCase__ : List[Any] = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 410 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
snake_case_ = set()
snake_case_ = 0
snake_case_ = n + 1 # maximum limit
for a in range(2 , _SCREAMING_SNAKE_CASE ):
for b in range(2 , _SCREAMING_SNAKE_CASE ):
snake_case_ = a**b # calculates the current power
collect_powers.add(_SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 2 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a : Union[str, Any] = 1_6
__a : int = 3_2
def UpperCAmelCase ( lowercase , lowercase = 16 ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase = datasets.map(
lowercase , batched=lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase = 16
elif accelerator.mixed_precision != "no":
__lowercase = 8
else:
__lowercase = None
return tokenizer.pad(
lowercase , padding='''longest''' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='''pt''' , )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase ) == "1":
__lowercase = 2
# Initialize accelerator
__lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
__lowercase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase = batch_size // MAX_GPU_BATCH_SIZE
__lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
__lowercase , __lowercase = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase = model.to(accelerator.device )
# Instantiate optimizer
__lowercase = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase = model(**lowercase )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowercase = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowercase )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowercase )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main() | 534 | 0 |
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = [[0 for _ in range(__lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ = 1
for n in range(m + 1 ):
for k in range(1 , __lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_a: Optional[int] = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
_a: int = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 709 |
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = generate_pascal_triangle(A )
for row_idx in range(A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(A ):
UpperCAmelCase_ = populate_current_row(A , A )
triangle.append(A )
return triangle
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , A ):
calculate_current_element(
A , A , A , A )
return current_row
def __lowerCAmelCase ( A , A , A , A , ):
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(A )
return result
def __lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A , A ) -> None:
UpperCAmelCase_ = F"{func.__name__}({value})"
UpperCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 268 | 0 |
from __future__ import annotations
from collections.abc import Generator
def _lowercase ( ) -> Dict:
UpperCamelCase__ : int = {}
UpperCamelCase__ : Optional[int] = 2
while True:
UpperCamelCase__ : List[str] = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
UpperCamelCase__ : List[str] = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ : List[str] = factor
else:
UpperCamelCase__ : Union[str, Any] = prime
yield prime
prime += 1
def _lowercase ( __SCREAMING_SNAKE_CASE = 1E10 ) -> Dict:
UpperCamelCase__ : Dict = sieve()
UpperCamelCase__ : Optional[int] = 1
while True:
UpperCamelCase__ : str = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution())
| 410 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase : Any = random.Random()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
snake_case_ : Union[str, Any] = global_rng
snake_case_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=4_0_0 , _lowercase=2_0_0_0 , _lowercase=1_0 , _lowercase=1_6_0 , _lowercase=8 , _lowercase=0.0 , _lowercase=4_0_0_0 , _lowercase=False , _lowercase=True , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[int] = min_seq_length
snake_case_ : Optional[Any] = max_seq_length
snake_case_ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : str = padding_value
snake_case_ : Tuple = sampling_rate
snake_case_ : List[str] = return_attention_mask
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = feature_size
snake_case_ : List[Any] = chunk_length
snake_case_ : int = hop_length
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self , _lowercase=False , _lowercase=False ) -> List[str]:
'''simple docstring'''
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
snake_case_ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : List[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Any = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Any = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(_lowercase )
snake_case_ : List[Any] = feat_extract_first.to_dict()
snake_case_ : Tuple = feat_extract_second.to_dict()
snake_case_ : Tuple = feat_extract_first.mel_filters
snake_case_ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[str] = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
snake_case_ : Tuple = self.feature_extraction_class.from_json_file(_lowercase )
snake_case_ : Dict = feat_extract_first.to_dict()
snake_case_ : str = feat_extract_second.to_dict()
snake_case_ : Any = feat_extract_first.mel_filters
snake_case_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ : Union[str, Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : List[str] = feature_extractor(_lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
snake_case_ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
snake_case_ : int = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : Optional[int] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ : Tuple = np.asarray(_lowercase )
snake_case_ : Dict = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test truncation required
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case_ : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
snake_case_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : List[Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs_truncated]
snake_case_ : Optional[Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
import torch
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Tuple = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case_ : Optional[int] = ds.sort("""id""" ).select(range(_lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case_ : Tuple = self._load_datasamples(1 )
snake_case_ : Dict = WhisperFeatureExtractor()
snake_case_ : List[str] = feature_extractor(_lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowercase , atol=1E-4 ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Dict = self._load_datasamples(1 )[0]
snake_case_ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case_ : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowercase )[0]
self.assertTrue(np.all(np.mean(_lowercase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase ) - 1 ) < 1E-3 ) )
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case__ , snake_case__ = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 33 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[str] = mock.Mock()
snake_case__ : Optional[int] = 5_0_0
snake_case__ : int = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Union[str, Any] = {}
snake_case__ : Optional[int] = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : int = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : str ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : List[str] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Union[str, Any] = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Optional[int] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Dict = Trie()
snake_case__ : Tuple = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 297 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCAmelCase : Union[str, Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCamelCase ( lowercase_ : List[str] ) -> Dict:
'''simple docstring'''
lowercase =torch.load(lowercase_ , map_location='''cpu''' )
return sd
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any]=rename_keys_prefix ) -> Tuple:
'''simple docstring'''
lowercase =OrderedDict()
lowercase =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase =key
for name_pair in rename_keys_prefix:
lowercase =new_key.replace(name_pair[0] , name_pair[1] )
lowercase =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[Any]:
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowercase ='''pretraining'''
if "vcr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 5_1_2}
lowercase ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
lowercase ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
lowercase ='''vqa'''
elif "nlvr" in checkpoint_path:
lowercase ={
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
lowercase ='''nlvr'''
lowercase =VisualBertConfig(**lowercase_ )
# Load State Dict
lowercase =load_state_dict(lowercase_ )
lowercase =get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
lowercase =VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
lowercase =VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
lowercase =VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
lowercase =VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCAmelCase : List[str] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 145 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
lowercase =transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
lowercase =transform(lowercase_ ).unsqueeze(0 ).to(lowercase_ )
return image
def UpperCamelCase ( lowercase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
if "visual_encoder" in key:
lowercase =re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowercase_ )
if "blocks" in key:
lowercase =re.sub(R'''blocks''' , '''layers''' , lowercase_ )
if "attn" in key:
lowercase =re.sub(R'''attn''' , '''self_attn''' , lowercase_ )
if "norm1" in key:
lowercase =re.sub(R'''norm1''' , '''layer_norm1''' , lowercase_ )
if "norm2" in key:
lowercase =re.sub(R'''norm2''' , '''layer_norm2''' , lowercase_ )
if "encoder.norm" in key:
lowercase =re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowercase_ )
if "encoder.patch_embed.proj" in key:
lowercase =re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowercase_ )
if "encoder.pos_embed" in key:
lowercase =re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowercase_ )
if "encoder.cls_token" in key:
lowercase =re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowercase_ )
if "self_attn" in key:
lowercase =re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowercase_ )
return key
@torch.no_grad()
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : int=None ) -> str:
'''simple docstring'''
if config_path is not None:
lowercase =BlipConfig.from_pretrained(lowercase_ )
else:
lowercase =BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
lowercase =BlipForConditionalGeneration(lowercase_ ).eval()
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase =blip_decoder(pretrained=lowercase_ , image_size=3_8_4 , vit='''base''' )
lowercase =pt_model.eval()
lowercase =pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
hf_model.load_state_dict(lowercase_ )
lowercase =3_8_4
lowercase =load_demo_image(image_size=lowercase_ , device='''cpu''' )
lowercase =BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase =tokenizer(['''a picture of'''] ).input_ids
lowercase =hf_model.generate(lowercase_ , lowercase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
lowercase =hf_model.generate(lowercase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase =(
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase =blip_vqa(pretrained=lowercase_ , image_size=lowercase_ , vit='''base''' )
vqa_model.eval()
lowercase =vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
lowercase =BlipForQuestionAnswering(lowercase_ )
hf_vqa_model.load_state_dict(lowercase_ )
lowercase =['''How many dogs are in this image?''']
lowercase =tokenizer(lowercase_ , return_tensors='''pt''' ).input_ids
lowercase =hf_vqa_model.generate(lowercase_ , lowercase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase =blip_itm(pretrained=lowercase_ , image_size=lowercase_ , vit='''base''' )
itm_model.eval()
lowercase =itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
lowercase =BlipForImageTextRetrieval(lowercase_ )
lowercase =['''A picture of a woman with a dog sitting in a beach''']
lowercase =tokenizer(
lowercase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowercase_ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase_ )
hf_itm_model.eval()
lowercase =hf_itm_model(lowercase_ , lowercase_ , use_itm_head=lowercase_ )
lowercase =hf_itm_model(lowercase_ , lowercase_ , use_itm_head=lowercase_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 145 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ""
a_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_ = None # compression type in fsspec. ex: "gzip"
a_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Tuple , __A : str = "" , __A : Optional[str] = None , __A : Optional[dict] = None , **__A : Optional[int] ):
super().__init__(self , **__A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ : Any = fsspec.open(
__A , mode="rb" , protocol=__A , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ : Dict = os.path.basename(self.file.path.split("::" )[0] )
snake_case__ : Tuple = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
snake_case__ : Union[str, Any] = None
@classmethod
def _lowercase ( cls : Union[str, Any] , __A : Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__A ).lstrip("/" )
def _lowercase ( self : Dict ):
if self.dir_cache is None:
snake_case__ : int = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
snake_case__ : Optional[Any] = {f["name"]: f}
def _lowercase ( self : Union[str, Any] , __A : str ):
return self.file.open().read()
def _lowercase ( self : str , __A : str , __A : str = "rb" , __A : str=None , __A : Tuple=True , __A : Optional[Any]=None , **__A : Optional[int] , ):
snake_case__ : Optional[Any] = self._strip_protocol(__A )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "bz2"
a_ = "bz2"
a_ = ".bz2"
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "gzip"
a_ = "gzip"
a_ = ".gz"
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "lz4"
a_ = "lz4"
a_ = ".lz4"
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xz"
a_ = "xz"
a_ = ".xz"
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "zstd"
a_ = "zstd"
a_ = ".zst"
def __init__( self : Union[str, Any] , __A : str , __A : str = "rb" , __A : Optional[str] = None , __A : Optional[dict] = None , __A : int = DEFAULT_BLOCK_SIZE , **__A : Tuple , ):
super().__init__(
fo=__A , mode=__A , target_protocol=__A , target_options=__A , block_size=__A , **__A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ : Union[str, Any] = self.file.__enter__
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : Dict ):
snake_case__ : Tuple = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : List[Any] , *__A : List[Any] , **__A : Union[str, Any] ):
self._file.__exit__(*__A , **__A )
def __iter__( self : List[Any] ):
return iter(self._file )
def _lowercase ( self : Optional[Any] ):
return next(self._file )
def __getattr__( self : Union[str, Any] , __A : Optional[int] ):
return getattr(self._file , __A )
def fixed_enter(*__A : Tuple , **__A : Dict ):
return WrappedFile(_enter(*__A , **__A ) )
snake_case__ : List[Any] = fixed_enter
| 297 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase =[1_44, 1_92, 2_40]
_lowerCAmelCase =[16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase =[96, 1_20, 1_44]
_lowerCAmelCase =[16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase =[64, 80, 96]
_lowerCAmelCase =[16, 16, 24, 48, 64, 80, 3_20]
_lowerCAmelCase =0.0_5
_lowerCAmelCase =2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
_lowerCAmelCase =5_12
_lowerCAmelCase =16
_lowerCAmelCase =21
_lowerCAmelCase ="""pascal-voc-id2label.json"""
else:
_lowerCAmelCase =10_00
_lowerCAmelCase ="""imagenet-1k-id2label.json"""
_lowerCAmelCase ="""huggingface/label-files"""
_lowerCAmelCase =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase ={int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowercase__ : str , lowercase__ : str=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if f"layer_{i}." in name:
_lowerCAmelCase =name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowerCAmelCase =name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
_lowerCAmelCase =name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
_lowerCAmelCase =name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
_lowerCAmelCase =name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase =name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase =name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
_lowerCAmelCase =name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
_lowerCAmelCase =name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
_lowerCAmelCase =name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
_lowerCAmelCase =name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
_lowerCAmelCase =name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
_lowerCAmelCase =name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
_lowerCAmelCase =name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
_lowerCAmelCase =name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
_lowerCAmelCase =name.replace(f".global_rep.{i}.weight" , """.layernorm.weight""" )
if f".global_rep.{i}.bias" in name:
_lowerCAmelCase =name.replace(f".global_rep.{i}.bias" , """.layernorm.bias""" )
if ".global_rep." in name:
_lowerCAmelCase =name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase =name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase =name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase =name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase =name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase =name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
_lowerCAmelCase =name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
_lowerCAmelCase =name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
_lowerCAmelCase =name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
_lowerCAmelCase =name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase =name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
_lowerCAmelCase =name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase ="""mobilevit.""" + name
return name
def snake_case_ ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
if base_model:
_lowerCAmelCase =""""""
else:
_lowerCAmelCase ="""mobilevit."""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase =orig_state_dict.pop(lowercase__ )
if key[:8] == "encoder.":
_lowerCAmelCase =key[8:]
if "qkv" in key:
_lowerCAmelCase =key.split(""".""" )
_lowerCAmelCase =int(key_split[0][6:] ) - 1
_lowerCAmelCase =int(key_split[3] )
_lowerCAmelCase =model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
_lowerCAmelCase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase =(
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowerCAmelCase =val[:dim, :]
_lowerCAmelCase =val[dim : dim * 2, :]
_lowerCAmelCase =val[-dim:, :]
else:
_lowerCAmelCase =val[:dim]
_lowerCAmelCase =val[dim : dim * 2]
_lowerCAmelCase =val[-dim:]
else:
_lowerCAmelCase =val
return orig_state_dict
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase =get_mobilevit_config(lowercase__ )
# load original state_dict
_lowerCAmelCase =torch.load(lowercase__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
_lowerCAmelCase =MobileViTForSemanticSegmentation(lowercase__ ).eval()
else:
_lowerCAmelCase =MobileViTForImageClassification(lowercase__ ).eval()
_lowerCAmelCase =convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase =image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCAmelCase =model(**lowercase__ )
_lowerCAmelCase =outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase =torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase =torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase =torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.8_6_2_4, -9.5_9_6_4], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase =torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase =torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
_lowerCAmelCase ={
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
_lowerCAmelCase =model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase__ , organization="""apple""" )
model.push_to_hub(lowercase__ , organization="""apple""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 710 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =BertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase =BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 149 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__: Any = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Union[str, Any] = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 345 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__: int = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__: Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__: Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[str, float]:
SCREAMING_SNAKE_CASE_ : Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[str, str]:
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
SCREAMING_SNAKE_CASE_ : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE_ : Any = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> list[str]:
SCREAMING_SNAKE_CASE_ : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE_ : Dict = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE_ : str = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE_ : List[Any] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE_ : Tuple = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
SCREAMING_SNAKE_CASE_ : int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE_ : Optional[Any] = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE_ : int = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE_ : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE_ : Dict = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__: Optional[Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCAmelCase__: List[Any] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__: Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
import os
from collections.abc import Iterator
def _lowerCamelCase ( __A : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__A ):
_UpperCAmelCase : List[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__A )[1] in (".py", ".ipynb"):
yield os.path.join(__A , __A ).lstrip('''./''' )
def _lowerCamelCase ( __A : Dict ) -> List[Any]:
return f'''{i * ' '}*''' if i else "\n##"
def _lowerCamelCase ( __A : str , __A : str ) -> str:
_UpperCAmelCase : int = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__A ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(__A )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def _lowerCamelCase ( __A : str = "." ) -> None:
_UpperCAmelCase : List[str] = ''''''
for filepath in sorted(good_file_paths(__A ) ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = os.path.split(__A )
if filepath != old_path:
_UpperCAmelCase : Optional[int] = print_path(__A , __A )
_UpperCAmelCase : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase : Union[str, Any] = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
_UpperCAmelCase : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(__A )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 186 | 0 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(snake_case__, (list, tuple) ) or not all(
isinstance(snake_case__, snake_case__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__magic_name__ : str = numbers[0]
for i in range(1, len(snake_case__ ) ):
# update the maximum and minimum subarray products
__magic_name__ : str = numbers[i]
if number < 0:
__magic_name__ : Dict = min_till_now, max_till_now
__magic_name__ : str = max(snake_case__, max_till_now * number )
__magic_name__ : List[str] = min(snake_case__, min_till_now * number )
# update the maximum product found till now
__magic_name__ : Optional[Any] = max(snake_case__, snake_case__ )
return max_prod
| 324 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "open-llama"
def __init__( self: int, a_: List[str]=100_000, a_: List[str]=4_096, a_: int=11_008, a_: Tuple=32, a_: Any=32, a_: Optional[Any]="silu", a_: Any=2_048, a_: List[Any]=0.02, a_: int=1E-6, a_: Optional[int]=True, a_: List[str]=0, a_: Any=1, a_: Optional[int]=2, a_: Tuple=False, a_: List[Any]=True, a_: Optional[int]=0.1, a_: Tuple=0.1, a_: List[Any]=True, a_: Optional[int]=True, a_: Dict=None, **a_: int, ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Dict = intermediate_size
_snake_case : str = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : Dict = initializer_range
_snake_case : Tuple = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""", a_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_dropout_prob
_snake_case : Optional[int] = use_stable_embedding
_snake_case : int = shared_input_output_embedding
_snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, tie_word_embeddings=a_, **a_, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_snake_case : Optional[int] = self.rope_scaling.get("""type""", a_ )
_snake_case : Optional[int] = self.rope_scaling.get("""factor""", a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(a_, a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 609 | 0 |
'''simple docstring'''
from string import ascii_uppercase
snake_case = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 3_6:
raise ValueError("base must be <= 36" )
lowerCAmelCase__ : Dict = ""
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : str = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = divmod(lowerCamelCase_ , lowerCamelCase_ )
if base >= 1_1 and 9 < mod < 3_6:
lowerCAmelCase__ : Optional[Any] = ALPHABET_VALUES[str(lowerCamelCase_ )]
else:
lowerCAmelCase__ : Any = str(lowerCamelCase_ )
new_value += actual_value
lowerCAmelCase__ : List[str] = num // base
lowerCAmelCase__ : Tuple = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCamelCase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 568 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case = 20_48
snake_case = 40_96
snake_case = 42
snake_case = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def choose_first(lowerCamelCase_ , lowerCamelCase_=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
lowerCAmelCase__ : Union[str, Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : str = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCAmelCase__ : Dict = {"id": example["id"]}
lowerCAmelCase__ : Optional[int] = example["annotations"]
lowerCAmelCase__ : Dict = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : str = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = ["<cls>"]
else:
lowerCAmelCase__ : Optional[Any] = ["short"]
lowerCAmelCase__ : Tuple = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : str = ["long"]
lowerCAmelCase__ : Tuple = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase_ )
lowerCAmelCase__ : Any = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Any = True
else:
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCAmelCase__ : List[str] = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = answer["start_token"]
lowerCAmelCase__ : str = answer["end_token"]
lowerCAmelCase__ : str = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : str = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCAmelCase__ : str = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : str = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : Optional[int] = " ".join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase_ , end="\n" )
print("Old:" , lowerCamelCase_ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=True ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
lowerCAmelCase__ : int = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : int = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCAmelCase__ : int = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase_ ),
"end_token": [-1_0_0] * len(lowerCamelCase_ ),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out["context"].split()
lowerCAmelCase__ : Dict = splitted_context[answer["end_token"]]
lowerCAmelCase__ : Union[str, Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
lowerCAmelCase__ : List[str] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : Optional[Any] = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : str = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCAmelCase__ : str = answer["start_token"]
lowerCAmelCase__ : Union[str, Any] = answer["end_token"]
if assertion:
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase_ , end="\n\n" )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : Optional[Any] = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : Any = start_token - i + q_len
lowerCAmelCase__ : Optional[Any] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCAmelCase__ : Union[str, Any] = -1_0_0
lowerCAmelCase__ : Optional[Any] = -1_0_0
answers_category.append("null" )
lowerCAmelCase__ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase_ ) )
print("Old:" , tokenizer.decode(lowerCamelCase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Any = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with jsonlines.open(lowerCamelCase_ , "a" ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc="Saving samples ... " ):
lowerCAmelCase__ : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case = load_dataset("""natural_questions""")
snake_case = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 568 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase_ ( __lowercase ):
__lowerCamelCase = 42
__lowerCamelCase = None
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=0.999 , __lowerCamelCase="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase__ : Dict = []
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = i / num_diffusion_timesteps
UpperCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self , _lowerCAmelCase = 1000 , _lowerCAmelCase = "fixed_small_log" , _lowerCAmelCase = True , _lowerCAmelCase = 1.0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCAmelCase__ : Optional[int] = betas_for_alpha_bar(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = 1.0 - self.betas
UpperCAmelCase__ : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : int = 1.0
# setable values
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = torch.from_numpy(np.arange(0 , _lowerCAmelCase )[::-1].copy() )
UpperCAmelCase__ : Optional[Any] = variance_type
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Optional[Any] = num_inference_steps
UpperCAmelCase__ : Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ : int = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ : str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ):
if prev_timestep is None:
UpperCAmelCase__ : Union[str, Any] = t - 1
UpperCAmelCase__ : Optional[int] = self.alphas_cumprod[t]
UpperCAmelCase__ : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ : Union[str, Any] = self.betas[t]
else:
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ : int = torch.log(torch.clamp(_lowerCAmelCase , min=1e-20 ) )
UpperCAmelCase__ : List[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ : Tuple = variance.log()
UpperCAmelCase__ : str = beta.log()
UpperCAmelCase__ : int = (predicted_variance + 1) / 2
UpperCAmelCase__ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase=None , _lowerCAmelCase = True , ):
UpperCAmelCase__ : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ : Optional[Any] = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ : Tuple = t - 1
UpperCAmelCase__ : Union[str, Any] = self.alphas_cumprod[t]
UpperCAmelCase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ : Optional[int] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ : List[Any] = self.betas[t]
UpperCAmelCase__ : List[Any] = self.alphas[t]
else:
UpperCAmelCase__ : int = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Optional[Any] = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Tuple = torch.clamp(
_lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ : List[str] = 0
if t > 0:
UpperCAmelCase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase , device=model_output.device )
UpperCAmelCase__ : Union[str, Any] = self._get_variance(
_lowerCAmelCase , predicted_variance=_lowerCAmelCase , prev_timestep=_lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
""" for the UnCLIPScheduler.""" )
UpperCAmelCase__ : List[Any] = variance * variance_noise
UpperCAmelCase__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCAmelCase__ : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ : Tuple = timesteps.to(original_samples.device )
UpperCAmelCase__ : List[str] = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 79 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCAmelCase : Optional[Any] = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
def __init__( self : List[str] , __snake_case : T ) -> None:
_a : Dict = data
_a : List[str] = self
_a : Tuple = 0
class lowerCamelCase ( Generic[T] ):
def __init__( self : List[Any] ) -> None:
# map from node name to the node object
_a : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self : str , __snake_case : T ) -> None:
# create a new set with x as its member
_a : int = DisjointSetTreeNode(__snake_case )
def snake_case_ ( self : Optional[int] , __snake_case : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
_a : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
_a : List[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self : List[str] , __snake_case : DisjointSetTreeNode[T] , __snake_case : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
_a : Union[str, Any] = nodea
else:
_a : List[str] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self : Any , __snake_case : T , __snake_case : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__snake_case ) , self.find_set(__snake_case ) )
class lowerCamelCase ( Generic[T] ):
def __init__( self : Dict ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
_a : dict[T, dict[T, int]] = {}
def snake_case_ ( self : Tuple , __snake_case : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
_a : List[Any] = {}
def snake_case_ ( self : Any , __snake_case : T , __snake_case : T , __snake_case : int ) -> None:
# add an edge with the given weight
self.add_node(__snake_case )
self.add_node(__snake_case )
_a : Dict = weight
_a : List[Any] = weight
def snake_case_ ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
_a : Any = []
_a : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __snake_case : x[2] )
# creating the disjoint set
_a : Any = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__snake_case )
# MST generation
_a : Any = 0
_a : List[Any] = 0
_a : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_a , _a , _a : Union[str, Any] = edges[index]
index += 1
_a : Any = disjoint_set.find_set(__snake_case )
_a : List[str] = disjoint_set.find_set(__snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__snake_case , __snake_case , __snake_case )
disjoint_set.union(__snake_case , __snake_case )
return graph
| 249 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Dict = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.