code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
def __lowerCamelCase ( ) -> str:
with open(os.path.dirname(a_ ) + """/grid.txt""" ) as f:
lowerCamelCase_ : Union[str, Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
lowerCamelCase_ : int = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase_ : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase_ : int = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase_ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase_ : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase_ : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase_ : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase_ : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 278 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__magic_name__ : str =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = [line.strip() for line in open(a_ , "r" ).readlines()]
__magic_name__ = []
if args.gold_data_mode == "qa":
__magic_name__ = pd.read_csv(a_ , sep="\t" , header=a_ )
for answer_list in data[1]:
__magic_name__ = ast.literal_eval(a_ )
answers.append(a_ )
else:
__magic_name__ = [line.strip() for line in open(a_ , "r" ).readlines()]
__magic_name__ = [[reference] for reference in references]
__magic_name__ = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
__magic_name__ = 100.0 * em / total
__magic_name__ = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = args.k
__magic_name__ = [line.strip() for line in open(a_ , "r" ).readlines()]
__magic_name__ = [line.strip() for line in open(a_ , "r" ).readlines()]
__magic_name__ = 0
for hypo, reference in zip(a_ , a_ ):
__magic_name__ = set(hypo.split("\t" )[:k] )
__magic_name__ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__magic_name__ = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
def strip_title(lowerCamelCase_ : Any ):
if title.startswith("\"" ):
__magic_name__ = title[1:]
if title.endswith("\"" ):
__magic_name__ = title[:-1]
return title
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors="pt" , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
__magic_name__ = rag_model.rag.question_encoder(a_ )
__magic_name__ = question_enc_outputs[0]
__magic_name__ = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__magic_name__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__magic_name__ = []
for docs in all_docs:
__magic_name__ = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append("\t".join(a_ ) )
return provenance_strings
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : str ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors="pt" , padding=a_ , truncation=a_ )
__magic_name__ = inputs_dict.input_ids.to(args.device )
__magic_name__ = inputs_dict.attention_mask.to(args.device )
__magic_name__ = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__magic_name__ = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info("Q: {} - A: {}".format(a_ , a_ ) )
return answers
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=a_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=a_ , choices=["exact", "compressed", "legacy"] , type=a_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=a_ , type=a_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=a_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=a_ , type=a_ , required=a_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=a_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=a_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=a_ , type=a_ , required=a_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=a_ , type=a_ , required=a_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=a_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=a_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=a_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=a_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=a_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=a_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__magic_name__ = parser.parse_args()
__magic_name__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __snake_case ( lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = {}
if args.model_type is None:
__magic_name__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__magic_name__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__magic_name__ = args.n_docs
if args.index_name is not None:
__magic_name__ = args.index_name
if args.index_path is not None:
__magic_name__ = args.index_path
else:
__magic_name__ = BartForConditionalGeneration
__magic_name__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , a_ )
__magic_name__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__magic_name__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(a_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__magic_name__ = RagRetriever.from_pretrained(a_ , **a_ )
__magic_name__ = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
__magic_name__ = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__magic_name__ = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
__magic_name__ = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write("\n".join(a_ ) + "\n" )
preds_file.flush()
__magic_name__ = []
if len(a_ ) > 0:
__magic_name__ = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write("\n".join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__magic_name__ : int =get_args()
main(args)
| 664 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
_a = "▁"
class __A ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ = [F'<extra_id_{i}>' for i in range(lowerCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase__ = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id''' in str(lowerCamelCase_ ) ) , lowerCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowerCamelCase__ = legacy
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , extra_ids=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = extra_ids
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowerCamelCase_ , )
return max_model_length
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase_ )) + [1]
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __lowerCamelCase ( self ):
'''simple docstring'''
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , lowerCamelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
return [self._convert_token_to_id(lowerCamelCase_ ) for token in self.get_sentinel_tokens()]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if len(lowerCamelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = self._add_eos_if_not_present(lowerCamelCase_ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase__ = self._add_eos_if_not_present(lowerCamelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if not self.legacy:
lowerCamelCase__ = SPIECE_UNDERLINE + text.replace(lowerCamelCase_ , ''' ''' )
return super().tokenize(lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if not self.legacy:
lowerCamelCase__ = text.startswith(lowerCamelCase_ )
if is_first:
lowerCamelCase__ = text[1:]
lowerCamelCase__ = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowerCamelCase_ ):
lowerCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token.startswith('''<extra_id_''' ):
lowerCamelCase__ = re.match(r'''<extra_id_(\d+)>''' , lowerCamelCase_ )
lowerCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCamelCase__ = self.sp_model.IdToPiece(lowerCamelCase_ )
else:
lowerCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] ={
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __A ( _UpperCAmelCase ):
a__ : Union[str, Any] = """t5"""
a__ : Optional[int] = ["""past_key_values"""]
a__ : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : Any , __a : str=32128 , __a : Optional[int]=512 , __a : int=64 , __a : str=2048 , __a : List[str]=6 , __a : Any=None , __a : List[Any]=8 , __a : Optional[int]=32 , __a : Tuple=128 , __a : Optional[Any]=0.1 , __a : Optional[int]=1E-6 , __a : List[str]=1.0 , __a : str="relu" , __a : Optional[int]=True , __a : Dict=True , __a : List[Any]=0 , __a : Optional[int]=1 , **__a : Any , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = '''gelu_new'''
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
class __A ( _UpperCAmelCase ):
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase_ = '''past_encoder_sequence + sequence'''
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="inputs" )
return common_inputs
@property
def _lowercase (self : Dict ):
return 13
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : str , **A : Tuple ) ->Tuple:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : Union[str, Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : str = image_classifier(lowerCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase_ ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
lowerCamelCase__ : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __lowerCamelCase ( self : Optional[Any] ) ->str:
lowerCamelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Optional[Any] = image_classifier(lowerCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
lowerCamelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
{'''score''': 0.3_33, '''label''': ANY(lowerCamelCase_ )},
],
] , )
@slow
@require_torch
def __lowerCamelCase ( self : Any ) ->Optional[int]:
lowerCamelCase__ : List[Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Union[str, Any] = image_classifier(lowerCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
lowerCamelCase__ : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : Any = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : str = image_classifier(lowerCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
lowerCamelCase__ : str = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 315 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : list[list] ) -> list[list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = current_set.copy()
for row_index, row in enumerate(a_ ):
SCREAMING_SNAKE_CASE_ : Dict = row[0]
for column_index, column in enumerate(a_ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : Tuple = column
continue
SCREAMING_SNAKE_CASE_ : int = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Optional[int] = current_set[0]
SCREAMING_SNAKE_CASE_ : str = [first_row]
SCREAMING_SNAKE_CASE_ : Optional[int] = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a_ )
continue
for column_index in range(len(a_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : List[str] = final_set[0]
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : List[Any] = simplify(a_ )
for i in range(len(a_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a_ )
SCREAMING_SNAKE_CASE_ : List[str] = resultant
return final_set
def __UpperCAmelCase ( lowerCamelCase_ : list[list] ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ : Dict = len(a_ ) + 1
if any(len(a_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(a_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(a_ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = []
for row_index, row in enumerate(a_ ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : Any = data_set.pop(a_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , a_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = data_set.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = simplify(a_ )
SCREAMING_SNAKE_CASE_ : Tuple = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = row.copy()[: len(a_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a_ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_row[::-1]
for column_index, column in enumerate(a_ ):
current_solution -= column * solutions[column_index]
solutions.append(a_ )
SCREAMING_SNAKE_CASE_ : int = []
for item in solutions:
final.append(float(round(a_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 105 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[Any]=7 , lowerCamelCase : str=3 , lowerCamelCase : Dict=18 , lowerCamelCase : str=30 , lowerCamelCase : int=4_00 , lowerCamelCase : Dict=True , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[str]=None , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase : Any=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=False , ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = size if size is not None else {'''height''': 20, '''width''': 20}
lowerCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : Any = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : int = do_resize
lowerCAmelCase_ : Dict = size
lowerCAmelCase_ : Tuple = do_center_crop
lowerCAmelCase_ : List[Any] = crop_size
lowerCAmelCase_ : Optional[Any] = do_normalize
lowerCAmelCase_ : Any = image_mean
lowerCAmelCase_ : Union[str, Any] = image_std
lowerCAmelCase_ : Tuple = do_reduce_labels
def __lowercase ( self : List[str] ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : int = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCAmelCase_ : int = Image.open(dataset[0]["""file"""] )
lowerCAmelCase_ : Optional[int] = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCAmelCase_ : Tuple = Image.open(ds[0]["""file"""] )
lowerCAmelCase_ : str = Image.open(ds[1]["""file"""] )
lowerCAmelCase_ : int = Image.open(ds[2]["""file"""] )
lowerCAmelCase_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __snake_case ( _UpperCAmelCase ,unittest.TestCase):
"""simple docstring"""
lowercase = BeitImageProcessor if is_vision_available() else None
def __lowercase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = BeitImageProcessingTester(self )
@property
def __lowercase ( self : int ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def __lowercase ( self : str ) -> Optional[Any]:
pass
def __lowercase ( self : Any ) -> Any:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Any = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowercase ( self : Any ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
lowerCAmelCase_ : Tuple = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test not batched input (PIL images)
lowerCAmelCase_ : Tuple = prepare_semantic_single_inputs()
lowerCAmelCase_ : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched input (PIL images)
lowerCAmelCase_ : str = prepare_semantic_batch_inputs()
lowerCAmelCase_ : Union[str, Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
def __lowercase ( self : int ) -> List[str]:
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase_ : Dict = prepare_semantic_single_inputs()
lowerCAmelCase_ : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 1_50 )
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : List[Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
| 275 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
from torch import nn
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = class_size
_UpperCamelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.mlp(lowerCamelCase_ )
return logits
| 147 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(a_ , max_face_number + 1 )
for dice_numbers in product(a_ , repeat=a_ ):
_UpperCAmelCase = sum(a_ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(a_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(a_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "vocab.txt"}
a_ = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a_ = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def __UpperCAmelCase ( __UpperCamelCase ):
with open(a_ , '''r''' ) as f:
__lowercase : Tuple = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase_ ( _UpperCAmelCase ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<unk>" , UpperCamelCase_="<cls>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_="<eos>" , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__lowercase : str = load_vocab_file(lowerCamelCase_ )
__lowercase : Optional[int] = dict(enumerate(self.all_tokens ) )
__lowercase : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__lowercase : Union[str, Any] = unk_token
__lowercase : List[str] = cls_token
__lowercase : Optional[int] = pad_token
__lowercase : Dict = mask_token
__lowercase : Dict = eos_token
__lowercase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
return text.split()
def _lowerCamelCase ( self , UpperCamelCase_=False ) -> Any:
return len(self._id_to_token )
def _lowerCamelCase ( self ) -> List[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Tuple = [self.cls_token_id]
__lowercase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__lowercase : Any = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Tuple = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCamelCase ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
| 76 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_a : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_a : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="auto" , UpperCAmelCase=-1 , UpperCAmelCase=0.9 , UpperCAmelCase=5 , UpperCAmelCase=5_0_0 , UpperCAmelCase="gpt2-large" , UpperCAmelCase=-1 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=2_5 , UpperCAmelCase=5 , UpperCAmelCase=True , UpperCAmelCase=2_5 , ):
__lowerCamelCase = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 479 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Tuple ) ->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : int = (32, 32)
lowerCamelCase_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def _lowerCAmelCase ( self : Optional[int] ) ->Any:
torch.manual_seed(0 )
lowerCamelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _lowerCAmelCase ( self : Tuple ) ->Any:
torch.manual_seed(0 )
lowerCamelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : int ) ->Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
def extract(*__a : int , **__a : List[Any] ):
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->Any:
lowerCamelCase_ : int = torch.ones([0] )
def _lowerCAmelCase ( self : Tuple , __a : List[str] ) ->Tuple:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
lowerCamelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : List[str] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
lowerCamelCase_ : List[str] = self.dummy_vae
lowerCamelCase_ : str = self.dummy_text_encoder
lowerCamelCase_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase_ : Dict = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase_ : List[Any] = output.images
lowerCamelCase_ : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase_ : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
lowerCamelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : List[Any] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : Any = self.dummy_text_encoder
lowerCamelCase_ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : Dict = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase_ : Dict = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase_ : str = output.images
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase_ : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : List[Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase_ : str = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
lowerCamelCase_ : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCamelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self : Any ) ->List[Any]:
lowerCamelCase_ : Optional[int] = self.dummy_cond_unet
lowerCamelCase_ : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase_ : Optional[int] = self.dummy_vae
lowerCamelCase_ : Tuple = self.dummy_text_encoder
lowerCamelCase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowerCamelCase_ : Union[str, Any] = unet.half()
lowerCamelCase_ : Optional[Any] = vae.half()
lowerCamelCase_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Dict = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : str = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ) ->str:
lowerCamelCase_ : int = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_ )
lowerCamelCase_ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : List[str] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowerCamelCase_ : Any = 4_003_660_346
lowerCamelCase_ : Union[str, Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : Optional[Any] = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[Any] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : str = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : str = output.images
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : str = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Dict ) ->List[Any]:
lowerCamelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_ )
lowerCamelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : List[str] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowerCamelCase_ : Optional[Any] = 2_734_971_755
lowerCamelCase_ : Union[str, Any] = 7
lowerCamelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : str = output.images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase_ : Tuple = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : List[Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Tuple = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase_ : str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowerCamelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase_ : List[str] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowerCamelCase_ : List[str] = 1_044_355_234
lowerCamelCase_ : List[Any] = 12
lowerCamelCase_ : Tuple = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : Dict = output.images
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase_ : List[str] = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase_ : Dict = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : Tuple = output.images
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 278 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ : List[Any] ={"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 664 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = GPTaTokenizer
lowerCAmelCase_ = GPTaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {"""add_prefix_space""": True}
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowerCamelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ = '''lower newer'''
# Testing tokenization
lowerCamelCase__ = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids without special tokens
lowerCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids with special tokens
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing the unknown token
lowerCamelCase__ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
pass
def __lowerCamelCase ( self , __lowerCAmelCase=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# Simple input
lowerCamelCase__ = '''This is a simple input'''
lowerCamelCase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase__ = ('''This is a simple input''', '''This is a pair''')
lowerCamelCase__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowerCamelCase__ = '''This is a simple input'''
lowerCamelCase__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowerCamelCase__ = ('''This is a simple input''', '''This is a pair''')
lowerCamelCase__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer(lowerCamelCase_ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
lowerCamelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''' )
lowerCamelCase__ = tokenizer(*lowerCamelCase_ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
lowerCamelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''$$$'''
lowerCamelCase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase_ , add_bos_token=lowerCamelCase_ )
lowerCamelCase__ = '''This is a simple input'''
lowerCamelCase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer(lowerCamelCase_ )
lowerCamelCase__ = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [self.get_tokenizer(do_lower_case=lowerCamelCase_ , add_bos_token=lowerCamelCase_ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = '''Encode this.'''
lowerCamelCase__ = '''This one too please.'''
lowerCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
encoded_sequence += tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ = tokenizer.encode_plus(
lowerCamelCase_ , lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , )
lowerCamelCase__ = encoded_sequence_dict['''input_ids''']
lowerCamelCase__ = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
lowerCamelCase__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_ )
]
lowerCamelCase__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_ )
lowerCamelCase__ = '''A photo of a cat'''
lowerCamelCase__ = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained('''./test_opt''' )
lowerCamelCase__ = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=lowerCamelCase_ )
lowerCamelCase__ = '''A photo of a cat'''
lowerCamelCase__ = tokenizer.encode(
lowerCamelCase_ , )
# Same as above
self.assertEqual(lowerCamelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_ )
lowerCamelCase__ = '''bos'''
lowerCamelCase__ = tokenizer.get_vocab()['''bos''']
lowerCamelCase__ = '''A photo of a cat'''
lowerCamelCase__ = tokenizer.encode(
lowerCamelCase_ , )
# We changed the bos token
self.assertEqual(lowerCamelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase__ = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __A ( _UpperCAmelCase ):
def __init__(self : int , *__a : Optional[int] , __a : List[str]=None , __a : Union[str, Any]=None , **__a : Tuple ):
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ = eval_examples
UpperCAmelCase_ = post_process_function
def _lowercase (self : str , __a : Optional[int]=None , __a : str=None , __a : str=None , __a : str = "eval" ):
UpperCAmelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ = self.get_eval_dataloader(lowerCamelCase_ )
UpperCAmelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ = time.time()
try:
UpperCAmelCase_ = eval_loop(
lowerCamelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
UpperCAmelCase_ = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def _lowercase (self : Optional[Any] , __a : int , __a : str , __a : Any=None , __a : str = "test" ):
UpperCAmelCase_ = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ = time.time()
try:
UpperCAmelCase_ = eval_loop(
lowerCamelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , "predict" )
UpperCAmelCase_ = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 78 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a ( ) -> List[str]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 105 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase_ ( A__ : np.ndarray , A__ : tuple[int, int] , A__ : tuple[int, int] , A__ : bool , ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = grid.shape
lowerCAmelCase_ : Optional[Any] = [-1, 1, 0, 0]
lowerCAmelCase_ : int = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase_ : List[Any] = [(0, source)], set()
lowerCAmelCase_ : Any = np.full((rows, cols) , np.inf )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = np.empty((rows, cols) , dtype=a_ )
lowerCAmelCase_ : str = None
while queue:
(lowerCAmelCase_) : Tuple = heappop(a_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase_ : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase_ : Optional[Any] = predecessors[x, y]
path.append(a_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a_ ) ):
lowerCAmelCase_ : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a_ , (dist + 1, (nx, ny)) )
lowerCAmelCase_ : int = dist + 1
lowerCAmelCase_ : Tuple = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCAmelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(a_ )
_UpperCAmelCase = importlib.import_module(f".{module_name}" , "transformers.models" )
try:
return getattr(a_ , a_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a_ , "__name__" , a_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module("transformers" )
if hasattr(a_ , a_ ):
return getattr(a_ , a_ )
return None
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ):
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(a_ , encoding="utf-8" ) as reader:
return json.load(a_ )
class _lowerCAmelCase :
def __init__( self ) -> Optional[int]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase_ )
def _a ( cls , a_ , **a_ ) -> List[str]:
_UpperCAmelCase = kwargs.pop("config" , lowerCamelCase_ )
_UpperCAmelCase = kwargs.pop("trust_remote_code" , lowerCamelCase_ )
_UpperCAmelCase = True
_UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase = config_dict.get("image_processor_type" , lowerCamelCase_ )
_UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase = config_dict.pop("feature_extractor_type" , lowerCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model\'s feature extractor configuration." )
_UpperCAmelCase = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_UpperCAmelCase = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model\'s feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCAmelCase = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# It could be in `config.image_processor_type``
_UpperCAmelCase = getattr(lowerCamelCase_ , "image_processor_type" , lowerCamelCase_ )
if hasattr(lowerCamelCase_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_UpperCAmelCase = image_processor_class_from_name(lowerCamelCase_ )
_UpperCAmelCase = image_processor_auto_map is not None
_UpperCAmelCase = image_processor_class is not None or type(lowerCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase = kwargs.pop("code_revision" , lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase_ )]
return image_processor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
raise ValueError(
f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def _a ( a_ , a_ ) -> Optional[int]:
IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase_ , lowerCamelCase_ )
| 657 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
UpperCamelCase =IFInpaintingSuperResolutionPipeline
UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCamelCase ( self ) -> Dict:
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Optional[Any]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
__lowercase : Any = torch.manual_seed(lowerCamelCase_ )
else:
__lowercase : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
__lowercase : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
__lowercase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
__lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
__lowercase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowerCamelCase ( self ) -> Dict:
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ) -> str:
self._test_save_load_local()
def _lowerCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 76 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 0 |
_a : Tuple = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_a : List[Any] = ["a", "b", "c", "d", "e"]
def UpperCamelCase__ ( _A: Dict , _A: Any , _A: List[Any] ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(a_ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(a_ , a_ , a_ )
# if all neighbors visited add current to sort
sort.append(a_ )
# if all vertices haven't been visited select a new one to visit
if len(a_ ) != len(a_ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(a_ , a_ , a_ )
# return sort
return sort
if __name__ == "__main__":
_a : Any = topological_sort('a', [], [])
print(sort)
| 479 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 0 |
import math
def __lowerCamelCase ( A__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( A__ : float = 0.1 ) -> int:
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(a_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient \'a\' must not be zero." )
__magic_name__ = b * b - 4 * a * c
__magic_name__ = (-b + sqrt(a_ )) / (2 * a)
__magic_name__ = (-b - sqrt(a_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 664 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
import qiskit
def lowerCAmelCase__(__snake_case ,__snake_case ) -> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCamelCase__ = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase__ = qiskit.QuantumCircuit(a_ ,a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase__ = qiskit.execute(a_ ,a_ ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
_a = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
'''simple docstring'''
from math import isqrt, loga
def lowerCAmelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a_ , a_ ):
UpperCAmelCase_ = False
return [i for i in range(2 , a_ ) if is_prime[i]]
def lowerCAmelCase_ ( snake_case_ : int = 80_08_00 , snake_case_ : int = 80_08_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = degree * loga(a_ )
UpperCAmelCase_ = int(a_ )
UpperCAmelCase_ = calculate_prime_numbers(a_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(a_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Tuple = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Any = original_name.split('''.''' )[0]
lowerCamelCase__ : int = key.split('''.''' )
lowerCamelCase__ : Optional[Any] = int(key_list[key_list.index(a_ ) - 2] )
lowerCamelCase__ : int = int(key_list[key_list.index(a_ ) - 1] )
lowerCamelCase__ : Any = orig_block_num - offset
lowerCamelCase__ : Any = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = OrderedDict()
lowerCamelCase__ : Optional[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowerCamelCase__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCamelCase__ : Union[str, Any] = key[: key.find('''proj''' )]
lowerCamelCase__ : Union[str, Any] = key.replace(a_ , f"patch_embeddings.{total_embed_found}." )
lowerCamelCase__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCamelCase__ : Dict = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowerCamelCase__ : Dict = replace_key_with_offset(a_ , a_ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowerCamelCase__ : Any = replace_key_with_offset(a_ , a_ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowerCamelCase__ : List[str] = replace_key_with_offset(a_ , a_ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowerCamelCase__ : Union[str, Any] = replace_key_with_offset(a_ , a_ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowerCamelCase__ : Any = replace_key_with_offset(a_ , a_ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowerCamelCase__ : Optional[int] = replace_key_with_offset(a_ , a_ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowerCamelCase__ : Optional[Any] = key.replace('''head''' , '''classifier''' )
lowerCamelCase__ : Tuple = value
return new_state_dict
def _a ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Tuple = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : str = PoolFormerConfig()
# set attributes based on model_name
lowerCamelCase__ : List[str] = '''huggingface/label-files'''
lowerCamelCase__ : Any = model_name[-3:]
lowerCamelCase__ : Dict = 1000
lowerCamelCase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowerCamelCase__ : Any = (1, 1000)
# set config attributes
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Tuple = {int(a_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCamelCase__ : str = [2, 2, 6, 2]
lowerCamelCase__ : Optional[int] = [64, 128, 320, 512]
lowerCamelCase__ : str = 4.0
lowerCamelCase__ : Union[str, Any] = 0.9
elif size == "s24":
lowerCamelCase__ : Any = [4, 4, 12, 4]
lowerCamelCase__ : Tuple = [64, 128, 320, 512]
lowerCamelCase__ : Dict = 4.0
lowerCamelCase__ : Optional[Any] = 0.9
elif size == "s36":
lowerCamelCase__ : Optional[Any] = [6, 6, 18, 6]
lowerCamelCase__ : Optional[Any] = [64, 128, 320, 512]
lowerCamelCase__ : List[str] = 4.0
lowerCamelCase__ : Any = 1E-6
lowerCamelCase__ : List[Any] = 0.9
elif size == "m36":
lowerCamelCase__ : int = [6, 6, 18, 6]
lowerCamelCase__ : str = [96, 192, 384, 768]
lowerCamelCase__ : str = 4.0
lowerCamelCase__ : Any = 1E-6
lowerCamelCase__ : List[Any] = 0.95
elif size == "m48":
lowerCamelCase__ : List[Any] = [8, 8, 24, 8]
lowerCamelCase__ : List[str] = [96, 192, 384, 768]
lowerCamelCase__ : Tuple = 4.0
lowerCamelCase__ : int = 1E-6
lowerCamelCase__ : Dict = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
lowerCamelCase__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=a_ )
# Prepare image
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=a_ , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
lowerCamelCase__ : List[str] = torch.load(a_ , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCamelCase__ : Union[str, Any] = rename_keys(a_ )
# create HuggingFace model and load state dict
lowerCamelCase__ : Union[str, Any] = PoolFormerForImageClassification(a_ )
model.load_state_dict(a_ )
model.eval()
# Define image processor
lowerCamelCase__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=a_ )
lowerCamelCase__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowerCamelCase__ : List[Any] = model(a_ )
lowerCamelCase__ : List[str] = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCamelCase__ : Optional[int] = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
lowerCamelCase__ : Tuple = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
lowerCamelCase__ : Optional[Any] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
lowerCamelCase__ : int = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
lowerCamelCase__ : int = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A : str = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 315 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros_like(a_ )
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCamelCase__ : Union[str, Any] = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
UpperCamelCase__ : Optional[int] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCamelCase__ : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCamelCase__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCamelCase__ : Optional[Any] = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 105 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
'''simple docstring'''
__A : Dict = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 275 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCAmelCase = get_logger(__name__)
class lowerCamelCase_ :
__lowercase : Any = "dummy_data"
__lowercase : Optional[int] = "datasets"
__lowercase : Union[str, Any] = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = dataset_name
_UpperCamelCase = cache_dir
_UpperCamelCase = use_local_dummy_data
_UpperCamelCase = config
# download_callbacks take a single url as input
_UpperCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCamelCase = str(lowerCamelCase_ )
# to be downloaded
_UpperCamelCase = None
_UpperCamelCase = None
@property
def lowercase ( self ) -> str:
"""simple docstring"""
if self._dummy_file is None:
_UpperCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCamelCase = cached_path(
lowerCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCamelCase_ , force_extract=lowerCamelCase_ )
return os.path.join(lowerCamelCase_ , self.dummy_file_name )
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
if self._bucket_url is None:
_UpperCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowercase ( self , lowerCamelCase_ , *lowerCamelCase_ ) -> int:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return self.create_dummy_data_dict(lowerCamelCase_ , lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCamelCase_ , lowerCamelCase_ )
else:
return self.create_dummy_data_single(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , *lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
return self.download_and_extract(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
return self.download_and_extract(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
"""simple docstring"""
return path
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return {}
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for single_url in single_urls:
download_callback(lowerCamelCase_ )
else:
_UpperCamelCase = single_urls
download_callback(lowerCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = [os.path.join(lowerCamelCase_ , urllib.parse.quote_plus(Path(lowerCamelCase_ ).name ) ) for x in single_urls]
else:
_UpperCamelCase = single_urls
_UpperCamelCase = os.path.join(lowerCamelCase_ , urllib.parse.quote_plus(Path(lowerCamelCase_ ).name ) )
_UpperCamelCase = value
# make sure that values are unique
if all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_UpperCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCamelCase = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCamelCase_ ) ) for url in data_url )
_UpperCamelCase = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_UpperCamelCase = [data_url[0]] * len(lowerCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase = os.path.join(lowerCamelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCamelCase_ )
return dummy_data_list
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase = os.path.join(lowerCamelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase ( self ) -> str:
"""simple docstring"""
pass
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
def _iter_archive_members(lowerCamelCase_ ):
# this preserves the order of the members inside the ZIP archive
_UpperCamelCase = Path(self.dummy_file ).parent
_UpperCamelCase = path.relative_to(lowerCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_UpperCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCamelCase_ )
_UpperCamelCase = Path(lowerCamelCase_ )
_UpperCamelCase = _iter_archive_members(lowerCamelCase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCamelCase_ ).as_posix(), file_path.open("rb" )
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = [paths]
for path in paths:
if os.path.isfile(lowerCamelCase_ ):
if os.path.basename(lowerCamelCase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCamelCase_ ):
if os.path.basename(lowerCamelCase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCamelCase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCamelCase_ , lowerCamelCase_ )
| 147 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
"""simple docstring"""
import numpy as np
import qiskit
def __UpperCAmelCase ( __UpperCamelCase = 8 , __UpperCamelCase = None ):
__lowercase : Union[str, Any] = np.random.default_rng(seed=a_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowercase : Optional[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
__lowercase : Optional[Any] = rng.integers(2 , size=a_ )
# The set of states Alice will prepare.
__lowercase : List[Any] = rng.integers(2 , size=a_ )
# Measurement basis for Bob's qubits.
__lowercase : List[str] = rng.integers(2 , size=a_ )
# Quantum Circuit to simulate BB84
__lowercase : int = qiskit.QuantumCircuit(a_ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(a_ ):
if alice_state[index] == 1:
bbaa_circ.x(a_ )
if alice_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(a_ ):
if bob_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowercase : int = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowercase : int = qiskit.execute(a_ , a_ , shots=1 , seed_simulator=a_ )
# Returns the result of measurement.
__lowercase : Any = job.result().get_counts(a_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowercase : Tuple = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
a_ , a_ , a_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowercase : Union[str, Any] = gen_key[:key_len] if len(a_ ) >= key_len else gen_key.ljust(a_ , '''0''' )
return key
if __name__ == "__main__":
print(F"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 76 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowerCamelCase = str(a_ )
__lowerCamelCase = ''''''.join(sorted(a_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCamelCase__ ( _A: float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowerCamelCase = 0
__lowerCamelCase = 1
while True:
if check_bouncy(a_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 479 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
snake_case__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
snake_case__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
snake_case__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
snake_case__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
snake_case__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE_ (_UpperCAmelCase ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_a = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE_ (_UpperCAmelCase ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_a = DPRQuestionEncoderTokenizer
snake_case__ : Union[str, Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
snake_case__ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
snake_case__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __call__( self : str , __a : Optional[int] , __a : Optional[str] = None , __a : Optional[str] = None , __a : Union[bool, str] = False , __a : Union[bool, str] = False , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , **__a : Tuple , ) ->BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
lowerCamelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase_ : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
lowerCamelCase_ : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
lowerCamelCase_ : Optional[int] = len(lowerCamelCase_ )
lowerCamelCase_ : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.'''
lowerCamelCase_ : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
lowerCamelCase_ : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
lowerCamelCase_ : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def _lowerCAmelCase ( self : Optional[Any] , __a : BatchEncoding , __a : DPRReaderOutput , __a : int = 16 , __a : int = 64 , __a : int = 4 , ) ->List[DPRSpanPrediction]:
lowerCamelCase_ : List[Any] = reader_input['''input_ids''']
lowerCamelCase_ : Optional[int] = reader_output[:3]
lowerCamelCase_ : Dict = len(lowerCamelCase_ )
lowerCamelCase_ : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
lowerCamelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCamelCase_ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ : Dict = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ : Optional[int] = len(lowerCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCAmelCase ( self : Optional[int] , __a : List[int] , __a : List[int] , __a : int , __a : int , ) ->List[DPRSpanPrediction]:
lowerCamelCase_ : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ : Dict = sorted(lowerCamelCase_ , key=lambda __a : x[1] , reverse=lowerCamelCase_ )
lowerCamelCase_ : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE_ (_UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
_a = DPRReaderTokenizer
| 278 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
'''simple docstring'''
__magic_name__ : Dict ="Tobias Carryer"
from time import time
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : str=int(time() ) ) -> List[str]: # noqa: B008
__magic_name__ = multiplier
__magic_name__ = increment
__magic_name__ = modulo
__magic_name__ = seed
def __A ( self : Tuple ) -> int:
__magic_name__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__magic_name__ : Any =LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 664 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(a_ ,a_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = JsonDatasetReader(a_ ,cache_dir=a_ ,keep_in_memory=a_ ).read()
_check_json_dataset(a_ ,a_ )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader(a_ ,features=a_ ,cache_dir=a_ ).read()
_check_json_dataset(a_ ,a_ )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader(a_ ,features=a_ ,cache_dir=a_ ).read()
assert isinstance(a_ ,a_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCamelCase__ = features.copy()
lowerCamelCase__ = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = JsonDatasetReader(a_ ,features=a_ ,cache_dir=a_ ).read()
assert isinstance(a_ ,a_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase__ = JsonDatasetReader(a_ ,cache_dir=a_ ,split=a_ ).read()
_check_json_dataset(a_ ,a_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' ,[str, list] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
if issubclass(a_ ,a_ ):
lowerCamelCase__ = jsonl_path
elif issubclass(a_ ,a_ ):
lowerCamelCase__ = [jsonl_path]
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase__ = JsonDatasetReader(a_ ,cache_dir=a_ ).read()
_check_json_dataset(a_ ,a_ )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=("train",) ) -> Optional[int]:
'''simple docstring'''
assert isinstance(a_ ,a_ )
for split in splits:
lowerCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = JsonDatasetReader({'''train''': jsonl_path} ,cache_dir=a_ ,keep_in_memory=a_ ).read()
_check_json_datasetdict(a_ ,a_ )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader({'''train''': jsonl_path} ,features=a_ ,cache_dir=a_ ).read()
_check_json_datasetdict(a_ ,a_ )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if split:
lowerCamelCase__ = {split: jsonl_path}
else:
lowerCamelCase__ = '''train'''
lowerCamelCase__ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCamelCase__ = tmp_path / '''cache'''
lowerCamelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase__ = JsonDatasetReader(a_ ,cache_dir=a_ ).read()
_check_json_datasetdict(a_ ,a_ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return json.load(a_ )
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
return [json.loads(a_ ) for line in buffer]
class __A :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = tmp_path_factory.mktemp('''data''' ) / F'test.json.{extension}'
lowerCamelCase__ = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , '''rb''' , compression='''infer''' ) as f:
lowerCamelCase__ = f.read()
with fsspec.open(lowerCamelCase_ , '''rb''' , compression='''infer''' ) as f:
lowerCamelCase__ = f.read()
assert exported_content == original_content
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __A ( _UpperCAmelCase ):
a__ : Any = """owlvit_text_model"""
def __init__(self : List[str] , __a : Union[str, Any]=49408 , __a : Union[str, Any]=512 , __a : Dict=2048 , __a : Optional[int]=12 , __a : Dict=8 , __a : Union[str, Any]=16 , __a : str="quick_gelu" , __a : int=1E-5 , __a : Optional[int]=0.0 , __a : Any=0.02 , __a : List[str]=1.0 , __a : Union[str, Any]=0 , __a : List[Any]=49406 , __a : int=49407 , **__a : Dict , ):
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def _lowercase (cls : Optional[Any] , __a : Union[str, os.PathLike] , **__a : Tuple ):
cls._set_token_in_kwargs(lowerCamelCase_ )
UpperCAmelCase_ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class __A ( _UpperCAmelCase ):
a__ : List[str] = """owlvit_vision_model"""
def __init__(self : Dict , __a : int=768 , __a : Dict=3072 , __a : Dict=12 , __a : Any=12 , __a : str=3 , __a : Optional[Any]=768 , __a : str=32 , __a : Union[str, Any]="quick_gelu" , __a : Tuple=1E-5 , __a : List[str]=0.0 , __a : Any=0.02 , __a : List[Any]=1.0 , **__a : Dict , ):
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def _lowercase (cls : Tuple , __a : Union[str, os.PathLike] , **__a : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase_ )
UpperCAmelCase_ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class __A ( _UpperCAmelCase ):
a__ : Union[str, Any] = """owlvit"""
a__ : Optional[int] = True
def __init__(self : List[Any] , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Dict=512 , __a : Dict=2.65_92 , __a : Union[str, Any]=True , **__a : str , ):
super().__init__(**lowerCamelCase_ )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**lowerCamelCase_ )
UpperCAmelCase_ = OwlViTVisionConfig(**lowerCamelCase_ )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def _lowercase (cls : Optional[Any] , __a : Union[str, os.PathLike] , **__a : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase_ )
UpperCAmelCase_ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def _lowercase (cls : Optional[int] , __a : Dict , __a : Dict , **__a : List[str] ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class __A ( _UpperCAmelCase ):
@property
def _lowercase (self : Union[str, Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def _lowercase (self : List[Any] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def _lowercase (self : Optional[int] ):
return 1E-4
def _lowercase (self : Any , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : Optional["TensorType"] = None , ):
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , framework=lowerCamelCase_ )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ )
return {**text_input_dict, **image_input_dict}
@property
def _lowercase (self : Dict ):
return 14
| 78 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
_UpperCAmelCase : int = StableDiffusionSAGPipeline
_UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : str = False
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCamelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase__ : Optional[int] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self : str , A : Any , A : Optional[int]=0 ) ->Any:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self : int ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Any ) ->Optional[int]:
lowerCamelCase__ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase__ : int = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = '''.'''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase__ : Tuple = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = '''.'''
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase__ : Any = sag_pipe.to(lowerCamelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = '''.'''
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Any = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' , )
lowerCamelCase__ : str = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 315 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
SCREAMING_SNAKE_CASE_ : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE_ : List[str] = tf_top_k_top_p_filtering(lowerCamelCase_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = output[output != -float('inf' )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.cast(
tf.where(tf.not_equal(lowerCamelCase_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1E-12 )
tf.debugging.assert_equal(lowerCamelCase_ ,lowerCamelCase_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase , _UpperCAmelCase ):
if is_tf_available():
__a : List[str] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Dict = 2
class lowerCAmelCase_ ( tf.Module ):
def __init__( self ,snake_case__ ):
super(lowerCamelCase_ ,self ).__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=lowerCamelCase_ ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model.generate(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,max_new_tokens=lowerCamelCase_ ,return_dict_in_generate=lowerCamelCase_ ,)
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE_ : int = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DummyModel(model=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase_ ,lowerCamelCase_ ,signatures={'serving_default': dummy_model.serving} )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.saved_model.load(lowerCamelCase_ ).signatures['''serving_default''']
for batch_size in range(1 ,len(lowerCamelCase_ ) + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = serving_func(**lowerCamelCase_ )['''sequences''']
SCREAMING_SNAKE_CASE_ : int = test_model.generate(**lowerCamelCase_ ,max_new_tokens=lowerCamelCase_ )
tf.debugging.assert_equal(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
class lowerCAmelCase_ ( tf.Module ):
def __init__( self ,snake_case__ ):
super(lowerCamelCase_ ,self ).__init__()
SCREAMING_SNAKE_CASE_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=lowerCamelCase_ ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model.generate(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,max_new_tokens=lowerCamelCase_ ,return_dict_in_generate=lowerCamelCase_ ,)
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE_ : List[str] = [[2], [102, 103]]
SCREAMING_SNAKE_CASE_ : List[Any] = [[1], [1, 1]]
SCREAMING_SNAKE_CASE_ : List[str] = DummyModel(model=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase_ ,lowerCamelCase_ ,signatures={'serving_default': dummy_model.serving} )
SCREAMING_SNAKE_CASE_ : Any = tf.saved_model.load(lowerCamelCase_ ).signatures['''serving_default''']
for input_row in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE_ : List[str] = serving_func(**lowerCamelCase_ )['''sequences''']
SCREAMING_SNAKE_CASE_ : List[Any] = test_model.generate(**lowerCamelCase_ ,max_new_tokens=lowerCamelCase_ )
tf.debugging.assert_equal(lowerCamelCase_ ,lowerCamelCase_ )
@slow
@require_tensorflow_text
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=lowerCamelCase_ )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCamelCase_ ,'spiece.model' ) ,'rb' ).read() )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.tokenize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = text.pad_model_inputs(
lowerCamelCase_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.generate(input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
return self.tokenizer.detokenize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
SCREAMING_SNAKE_CASE_ : str = complete_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.Model(lowerCamelCase_ ,lowerCamelCase_ )
keras_model.save(lowerCamelCase_ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = 14
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ : str = '''Hello, my dog is cute and'''
SCREAMING_SNAKE_CASE_ : str = tokenizer(lowerCamelCase_ ,return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ : int = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE_ : str = model.generate(**lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE_ : str = '''Hugging Face is a technology company based in New York and Paris.'''
SCREAMING_SNAKE_CASE_ : Optional[int] = bart_tokenizer(lowerCamelCase_ ,return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ : Tuple = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE_ : List[Any] = bart_model.generate(lowerCamelCase_ ).numpy()
class lowerCAmelCase_ ( _UpperCAmelCase ):
def snake_case ( self ,snake_case__ ,snake_case__=None ,**snake_case__ ):
return super().call(lowerCamelCase_ ,**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bart_model.generate(lowerCamelCase_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(lowerCamelCase_ ,lowerCamelCase_ ) )
class lowerCAmelCase_ ( bart_model.model.encoder.__class__ ):
def snake_case ( self ,snake_case__ ,**snake_case__ ):
return super().call(lowerCamelCase_ ,**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = FakeEncoder(bart_model.config ,bart_model.model.shared )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE_ : Tuple = bart_model.generate(lowerCamelCase_ ).numpy()
with self.assertRaises(lowerCamelCase_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCamelCase_ ,foo='bar' )
| 105 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = emb.weight.shape
lowerCAmelCase_ : Optional[Any] = nn.Linear(a_ , a_ , bias=a_ )
lowerCAmelCase_ : Optional[int] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( A__ : int , A__ : Dict=None ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase_ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase_ : Dict = key.replace("""moe_layer.experts.0""" , f'ffn.experts.expert_{expert_idx}' )
else:
lowerCAmelCase_ : Dict = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
lowerCAmelCase_ : List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
lowerCAmelCase_ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
lowerCAmelCase_ : List[str] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
lowerCAmelCase_ : int = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase_ : int = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
lowerCAmelCase_ : List[Any] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
lowerCAmelCase_ : Dict = state_dict[old_key]
return new_dict
def UpperCamelCase_ ( A__ : int , A__ : List[Any] , A__ : Any , A__ : str , A__ : str = WEIGHTS_NAME ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = 0
os.makedirs(a_ , exist_ok=a_ )
for expert in range(a_ ):
lowerCAmelCase_ : Union[str, Any] = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(a_ ):
lowerCAmelCase_ : List[str] = torch.load(a_ )['''model''']
remove_ignore_keys_(a_ )
lowerCAmelCase_ : str = rename_fairseq_keys(a_ , a_ )
lowerCAmelCase_ : List[Any] = os.path.join(
a_ , weights_name.replace(""".bin""" , f'-{len(a_ )+1:05d}-of-???.bin' ) )
torch.save(a_ , a_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(a_ )[0]].dtype )
# Add the last block
lowerCAmelCase_ : Optional[int] = os.path.join(a_ , weights_name.replace(""".bin""" , f'-{len(a_ )+1:05d}-of-???.bin' ) )
lowerCAmelCase_ : Dict = torch.load(switch_checkpoint_path + """-shared.pt""" )['''model''']
remove_ignore_keys_(a_ )
lowerCAmelCase_ : Any = rename_fairseq_keys(a_ , a_ )
lowerCAmelCase_ : List[str] = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a_ ) == 1:
lowerCAmelCase_ : Union[str, Any] = os.path.join(a_ , a_ )
torch.save(a_ , a_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a_ , a_ )
# Otherwise, let's build the index
lowerCAmelCase_ : Any = {}
for idx, shard in enumerate(a_ ):
lowerCAmelCase_ : Any = weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-{len(a_ ):05d}.bin' )
lowerCAmelCase_ : Union[str, Any] = os.path.join(a_ , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a_ , os.path.join(a_ , a_ ) )
for key in shard:
lowerCAmelCase_ : str = shard_file
# Add the metadata
lowerCAmelCase_ : List[Any] = {'''total_size''': total_size}
lowerCAmelCase_ : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a_ , a_ ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : Union[str, Any] = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
return metadata, index
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__A : List[Any] = parser.parse_args()
__A : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A : str = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 275 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = data
_UpperCamelCase = [0x6745_2301, 0xefcd_ab89, 0x98ba_dcfe, 0x1032_5476, 0xc3d2_e1f0]
@staticmethod
def lowercase ( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xffff_ffff
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
_UpperCamelCase = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowercase ( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = list(struct.unpack(">16L" , lowerCamelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_UpperCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.padding()
_UpperCamelCase = self.split_blocks()
for block in self.blocks:
_UpperCamelCase = self.expand_block(lowerCamelCase_ )
_UpperCamelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_UpperCamelCase = (b & c) | ((~b) & d)
_UpperCamelCase = 0x5a82_7999
elif 20 <= i < 40:
_UpperCamelCase = b ^ c ^ d
_UpperCamelCase = 0x6ed9_eba1
elif 40 <= i < 60:
_UpperCamelCase = (b & c) | (b & d) | (c & d)
_UpperCamelCase = 0x8f1b_bcdc
elif 60 <= i < 80:
_UpperCamelCase = b ^ c ^ d
_UpperCamelCase = 0xca62_c1d6
_UpperCamelCase = (
self.rotate(lowerCamelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xffff_ffff,
a,
self.rotate(lowerCamelCase_ , 30 ),
c,
d,
)
_UpperCamelCase = (
self.h[0] + a & 0xffff_ffff,
self.h[1] + b & 0xffff_ffff,
self.h[2] + c & 0xffff_ffff,
self.h[3] + d & 0xffff_ffff,
self.h[4] + e & 0xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = B'''Test String'''
assert SHAaHash(a_ ).final_hash() == hashlib.shaa(a_ ).hexdigest() # noqa: S324
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_UpperCamelCase = f.read()
else:
_UpperCamelCase = bytes(a_ , "utf-8" )
print(SHAaHash(a_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 147 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , **a_ , ) -> List[Any]:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
_UpperCAmelCase = field
_UpperCAmelCase = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
_UpperCAmelCase = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def _a ( self ) -> str:
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__( self , a_ , a_ , a_ = None , a_ = None , **a_ , ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = '''utf-8'''
_UpperCAmelCase = to_json_kwargs
def _a ( self ) -> int:
_UpperCAmelCase = self.to_json_kwargs.pop("path_or_buf" , lowerCamelCase_ )
_UpperCAmelCase = self.to_json_kwargs.pop("orient" , "records" )
_UpperCAmelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
_UpperCAmelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
_UpperCAmelCase = self.to_json_kwargs.pop("compression" , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=lowerCamelCase_ ) as buffer:
_UpperCAmelCase = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
_UpperCAmelCase = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = args
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a ( self , a_ , a_ , a_ , a_ , **a_ , ) -> int:
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
_UpperCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
_UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 657 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = []
for part_id in partition_order:
__lowercase : Dict = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : str = spark.range(1_00 ).repartition(1 )
__lowercase : List[str] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : Dict = spark.range(10 ).repartition(2 )
__lowercase : List[Any] = [1, 0]
__lowercase : Dict = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
__lowercase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowercase : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : Union[str, Any] = spark.range(10 ).repartition(1 )
__lowercase : List[Any] = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__lowercase : Optional[int] = lambda __UpperCamelCase : x.reverse()
__lowercase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
__lowercase : List[str] = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
__lowercase : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowercase : Union[str, Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
__lowercase : List[str] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowercase : Any = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
__lowercase : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
__lowercase : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowercase : int = spark.range(1_00 ).repartition(1 )
__lowercase : Tuple = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 76 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self , UpperCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__lowerCamelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sgugger/tiny-distilbert-classification'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , only_pretrain_model=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''patrickvonplaten/t5-tiny-random'''
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ , configs=[config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase_ , save_to_csv=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase_ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(lowerCamelCase_ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(lowerCamelCase_ , """env.csv""" ) , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , """env.csv""" ) ).exists() )
def lowerCamelCase_ ( self ):
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(lowerCamelCase_ , """sequential""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """cumulative""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """current""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase_ , """log.txt""" ) , log_print=lowerCamelCase_ , trace_memory_line_by_line=lowerCamelCase_ , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
__lowerCamelCase = TensorFlowBenchmark(lowerCamelCase_ )
__lowerCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , """log.txt""" ) ).exists() )
| 479 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( A__ : np.ndarray , A__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a_ , a_ ) ) )
def __lowerCamelCase ( A__ : np.ndarray , A__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
lowerCamelCase_ : Union[str, Any] = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(a_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase_ : Optional[int] = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(a_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCamelCase_ : List[Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(a_ )
lowerCamelCase_ : int = []
for value in value_array:
lowerCamelCase_ : Any = euclidean(a_ , dataset[0] )
lowerCamelCase_ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase_ : Optional[int] = euclidean(a_ , a_ )
if dist > temp_dist:
lowerCamelCase_ : Optional[int] = temp_dist
lowerCamelCase_ : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( A__ : np.ndarray , A__ : np.ndarray ) -> float:
return np.dot(a_ , a_ ) / (norm(a_ ) * norm(a_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__magic_name__ : Any ="examples/"
__magic_name__ : Optional[Any] ={
"examples": (re.compile(R'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(R'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), "release = \"VERSION\"\n"),
}
__magic_name__ : Any ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__magic_name__ : Optional[int] ="README.md"
def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__magic_name__ = f.read()
__magic_name__ = REPLACE_PATTERNS[pattern]
__magic_name__ = replace.replace("VERSION" , a_ )
__magic_name__ = re_pattern.sub(a_ , a_ )
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a_ )
def __snake_case ( lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(a_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" )
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a_ , a_ , a_ )
if not patch:
update_version_in_examples(a_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = '''🤗 Transformers currently provides the following architectures'''
__magic_name__ = '''1. Want to contribute a new model?'''
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__magic_name__ = f.readlines()
# Find the start of the list.
__magic_name__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__magic_name__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__magic_name__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a_ )
def __snake_case ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__magic_name__ = f.read()
__magic_name__ = REPLACE_PATTERNS['''init'''][0].search(a_ ).groups()[0]
return packaging.version.parse(a_ )
def __snake_case ( lowerCamelCase_ : Tuple=False ):
'''simple docstring'''
__magic_name__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__magic_name__ = default_version.base_version
elif patch:
__magic_name__ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__magic_name__ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__magic_name__ = input(F'Which version are you releasing? [{default_version}]' )
if len(a_ ) == 0:
__magic_name__ = default_version
print(F'Updating version to {version}.' )
global_version_update(a_ , patch=a_ )
if not patch:
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = get_version()
__magic_name__ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__magic_name__ = current_version.base_version
# Check with the user we got that right.
__magic_name__ = input(F'Which version are we developing now? [{dev_version}]' )
if len(a_ ) == 0:
__magic_name__ = dev_version
print(F'Updating version to {version}.' )
global_version_update(a_ )
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ : List[Any] =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__magic_name__ : Any =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 664 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __A ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """lxmert"""
lowerCAmelCase_ = {}
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=9_5_0_0 , __lowerCAmelCase=1_6_0_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=9 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=4 , __lowerCAmelCase=6.67 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = num_qa_labels
lowerCamelCase__ = num_object_labels
lowerCamelCase__ = num_attr_labels
lowerCamelCase__ = l_layers
lowerCamelCase__ = x_layers
lowerCamelCase__ = r_layers
lowerCamelCase__ = visual_feat_dim
lowerCamelCase__ = visual_pos_dim
lowerCamelCase__ = visual_loss_normalizer
lowerCamelCase__ = task_matched
lowerCamelCase__ = task_mask_lm
lowerCamelCase__ = task_obj_predict
lowerCamelCase__ = task_qa
lowerCamelCase__ = visual_obj_loss
lowerCamelCase__ = visual_attr_loss
lowerCamelCase__ = visual_feat_loss
lowerCamelCase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**lowerCamelCase_ )
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( _UpperCAmelCase ):
def __init__(self : Tuple , __a : int , __a : int , __a : float , **__a : Tuple ):
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ = kwargs.pop("return_attention_mask" , lowerCamelCase_ )
super().__init__(**lowerCamelCase_ )
def _lowercase (self : int , __a : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __a : Union[bool, str, PaddingStrategy] = True , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[Union[str, TensorType]] = None , ):
if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ = processed_features[self.model_input_names[0]]
UpperCAmelCase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ = required_input[0]
if isinstance(lowerCamelCase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase_ ):
UpperCAmelCase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase_ ):
UpperCAmelCase_ = '''tf'''
elif is_torch_tensor(lowerCamelCase_ ):
UpperCAmelCase_ = '''pt'''
elif isinstance(lowerCamelCase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(lowerCamelCase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ = to_numpy(lowerCamelCase_ )
else:
UpperCAmelCase_ = [to_numpy(lowerCamelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ = self._get_padding_strategies(padding=lowerCamelCase_ , max_length=lowerCamelCase_ )
UpperCAmelCase_ = processed_features[self.model_input_names[0]]
UpperCAmelCase_ = len(lowerCamelCase_ )
if not all(len(lowerCamelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ = []
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ = self._truncate(
lowerCamelCase_ , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , truncation=lowerCamelCase_ , )
truncated_inputs.append(lowerCamelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ = {}
for i in range(lowerCamelCase_ ):
# padding
UpperCAmelCase_ = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase_ )
return BatchFeature(lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def _lowercase (self : List[Any] , __a : Union[Dict[str, np.ndarray], BatchFeature] , __a : Optional[int] = None , __a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a : Optional[int] = None , __a : Optional[bool] = None , ):
UpperCAmelCase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ = len(lowerCamelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ = np.ones(len(lowerCamelCase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ = max_length - len(lowerCamelCase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ = np.pad(
lowerCamelCase_ , lowerCamelCase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ = np.pad(
lowerCamelCase_ , lowerCamelCase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _lowercase (self : Optional[int] , __a : Union[Dict[str, np.ndarray], BatchFeature] , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ = len(lowerCamelCase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ = processed_features['''attention_mask'''][:max_length]
return processed_features
def _lowercase (self : str , __a : List[str]=False , __a : int=None ):
if padding is not False:
if padding is True:
UpperCAmelCase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ = PaddingStrategy(lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ = padding
else:
UpperCAmelCase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
from __future__ import annotations
def _a ( UpperCAmelCase ) -> list[int]: # This function is recursive
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = len(a_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase__ : List[str] = array[0]
lowerCamelCase__ : Any = False
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Union[str, Any] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase__ : Optional[Any] = longest_subsequence(a_ )
if len(a_ ) > len(a_ ):
lowerCamelCase__ : Optional[Any] = temp_array
else:
i += 1
lowerCamelCase__ : List[str] = [element for element in array[1:] if element >= pivot]
lowerCamelCase__ : Optional[int] = [pivot, *longest_subsequence(a_ )]
if len(a_ ) > len(a_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
"""simple docstring"""
if isinstance(a_ , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE_ : Tuple = tf.shape(a_ )
if tensor.shape == tf.TensorShape(a_ ):
return dynamic
SCREAMING_SNAKE_CASE_ : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a_ )]
def __UpperCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[str] = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=a_ , name=a_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str]=1E-5 , lowerCamelCase_ : List[Any]=-1 ) -> Tuple:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a_ , a_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE_ : str = tf.nn.moments(a_ , axes=[axis] , keepdims=a_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE_ : int = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE_ : Any = shape_list(a_ )[axis]
SCREAMING_SNAKE_CASE_ : List[str] = tf.reshape(a_ , a_ )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.reshape(a_ , a_ )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE_ : List[Any] = tf.nn.batch_normalization(
a_ , a_ , a_ , offset=a_ , scale=a_ , variance_epsilon=a_ , )
return outputs
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int=0 , lowerCamelCase_ : Tuple=-1 ) -> List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE_ : List[Any] = tf.shape(a_ )
SCREAMING_SNAKE_CASE_ : Tuple = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a_ , a_ )
def __UpperCAmelCase ( lowerCamelCase_ : tf.Tensor ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(a_ , tf.Tensor ):
SCREAMING_SNAKE_CASE_ : int = tf.convert_to_tensor(a_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE_ : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE_ : str = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE_ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : int , lowerCamelCase_ : str = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
a_ , tf.cast(a_ , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(a_ )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE_ : Dict = [x for x in data if len(a_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asarray(a_ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array_split(a_ , a_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE_ : Tuple = np.array_split(a_ , a_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chunk_data
else:
SCREAMING_SNAKE_CASE_ : List[str] = data
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if name in group.attrs:
SCREAMING_SNAKE_CASE_ : List[str] = [n.decode('utf8' ) if hasattr(a_ , 'decode' ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(a_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(lowerCamelCase_ : Dict ):
if isinstance(a_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a_ )
| 105 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( A__ : np.ndarray ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.shape(a_ )
if rows != columns:
lowerCAmelCase_ : Dict = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(a_ )
lowerCAmelCase_ : Optional[Any] = np.zeros((rows, columns) )
lowerCAmelCase_ : Any = np.zeros((rows, columns) )
for i in range(a_ ):
for j in range(a_ ):
lowerCAmelCase_ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(a_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowerCAmelCase_ : List[Any] = (table[i][j] - total) / upper[j][j]
lowerCAmelCase_ : Optional[int] = 1
for j in range(a_ , a_ ):
lowerCAmelCase_ : Dict = sum(lower[i][k] * upper[k][j] for k in range(a_ ) )
lowerCAmelCase_ : int = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( _UpperCAmelCase ):
__lowercase : Tuple = 42
__lowercase : str = 42
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 1 , lowerCamelCase_ = 20_00 , lowerCamelCase_ = None , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = self.unet.config.sample_size
_UpperCamelCase = (batch_size, 3, img_size, img_size)
_UpperCamelCase = self.unet
_UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
_UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
_UpperCamelCase = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ ).sample
_UpperCamelCase = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
_UpperCamelCase = output.prev_sample, output.prev_sample_mean
_UpperCamelCase = sample_mean.clamp(0 , 1 )
_UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 147 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Dict:
_UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
_UpperCAmelCase = tokenizer("This is me" , return_tensors="pt" )
_UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCAmelCase = model.generate(**lowerCamelCase_ )
_UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCAmelCase = model_reloaded.generate(**lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
_UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase_ ):
model.save_pretrained(lowerCamelCase_ )
_UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase_ )
| 657 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
@staticmethod
def _lowerCamelCase ( *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( __UpperCamelCase ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCamelCase =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : Tuple = pipeline(
'''document-question-answering''' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
__lowercase : List[str] = INVOICE_URL
__lowercase : Any = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '''''' ) ) )
__lowercase : Optional[int] = '''What is the placebo?'''
__lowercase : int = [
{
'''image''': load_image(lowerCamelCase_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
__lowercase : Any = dqa_pipeline(lowerCamelCase_ , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [
[
{'''score''': ANY(lowerCamelCase_ ), '''answer''': ANY(lowerCamelCase_ ), '''start''': ANY(lowerCamelCase_ ), '''end''': ANY(lowerCamelCase_ )},
{'''score''': ANY(lowerCamelCase_ ), '''answer''': ANY(lowerCamelCase_ ), '''start''': ANY(lowerCamelCase_ ), '''end''': ANY(lowerCamelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[str] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowercase : Optional[int] = INVOICE_URL
__lowercase : Optional[Any] = '''How many cats are there?'''
__lowercase : Dict = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowercase : Optional[int] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
__lowercase : str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : int = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : Any = []
__lowercase : str = []
__lowercase : str = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , words=lowerCamelCase_ , boxes=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowercase : int = INVOICE_URL
__lowercase : str = '''What is the invoice number?'''
__lowercase : List[str] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCamelCase ( self ) -> str:
__lowercase : Tuple = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowercase : Any = INVOICE_URL
__lowercase : Optional[Any] = '''What is the invoice number?'''
__lowercase : Union[str, Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Optional[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCamelCase ( self ) -> str:
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCamelCase_ )
__lowercase : List[str] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCamelCase_ , revision='''3dc6de3''' , )
__lowercase : Optional[Any] = INVOICE_URL
__lowercase : Any = '''What is the invoice number?'''
__lowercase : Dict = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : Optional[int] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowercase : List[str] = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : List[str] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCamelCase_ )
__lowercase : Any = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCamelCase_ , revision='''3dc6de3''' , max_seq_len=50 , )
__lowercase : Any = INVOICE_URL
__lowercase : str = '''What is the invoice number?'''
__lowercase : Optional[int] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowercase : Any = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : Tuple = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _lowerCamelCase ( self ) -> str:
__lowercase : Dict = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowercase : List[Any] = INVOICE_URL
__lowercase : str = '''What is the invoice number?'''
__lowercase : int = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _lowerCamelCase ( self ) -> Tuple:
pass
| 76 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = ["BeitFeatureExtractor"]
_a : Optional[Any] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Dict ) ->str:
lowerCamelCase_ : int = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ : Dict = test_metrics
@require_cpu
def _lowerCAmelCase ( self : Optional[int] ) ->str:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCAmelCase ( self : Tuple ) ->Dict:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ : Any = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
| 278 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
'''simple docstring'''
import numpy as np
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ = (0, 0)
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
def __eq__( self : Dict , _lowerCamelCase : List[str] ) -> int:
return self.position == cell.position
def __A ( self : List[Any] ) -> str:
print(self.position )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : int=(5, 5) ) -> Tuple:
__magic_name__ = np.zeros(lowerCamelCase_ )
__magic_name__ = world_size[0]
__magic_name__ = world_size[1]
def __A ( self : str ) -> int:
print(self.w )
def __A ( self : Optional[Any] , _lowerCamelCase : str ) -> Optional[int]:
__magic_name__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__magic_name__ = cell.position[0]
__magic_name__ = cell.position[1]
__magic_name__ = []
for n in neughbour_cord:
__magic_name__ = current_x + n[0]
__magic_name__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__magic_name__ = Cell()
__magic_name__ = (x, y)
__magic_name__ = cell
neighbours.append(lowerCamelCase_ )
return neighbours
def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
_open.append(a_ )
while _open:
__magic_name__ = np.argmin([n.f for n in _open] )
__magic_name__ = _open[min_f]
_closed.append(_open.pop(a_ ) )
if current == goal:
break
for n in world.get_neigbours(a_ ):
for c in _closed:
if c == n:
continue
__magic_name__ = current.g + 1
__magic_name__ = n.position
__magic_name__ = goal.position
__magic_name__ = (ya - ya) ** 2 + (xa - xa) ** 2
__magic_name__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a_ )
__magic_name__ = []
while current.parent is not None:
path.append(current.position )
__magic_name__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__magic_name__ : Dict =Gridworld()
# Start position and goal
__magic_name__ : Union[str, Any] =Cell()
__magic_name__ : List[Any] =(0, 0)
__magic_name__ : List[str] =Cell()
__magic_name__ : Optional[Any] =(4, 4)
print(F'''path from {start.position} to {goal.position}''')
__magic_name__ : Optional[Any] =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__magic_name__ : List[Any] =1
print(world.w)
| 664 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __A :
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return None
class __A :
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return None
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , '''tf''' , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , lowerCamelCase_ )
@require_tf
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ = self._test_export(lowerCamelCase_ , '''tf''' , 1_2 , **lowerCamelCase_ )
lowerCamelCase__ = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ = self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , **lowerCamelCase_ )
lowerCamelCase__ = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ = Path(lowerCamelCase_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , '''tf''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = FeatureExtractionPipeline(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
lowerCamelCase__ = infer_shapes(lowerCamelCase_ , lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
lowerCamelCase__ = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
lowerCamelCase__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ) , set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 481 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 78 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : Dict = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
_UpperCAmelCase : int = "gpt_neo"
_UpperCAmelCase : Union[str, Any] = ["past_key_values"]
_UpperCAmelCase : List[str] = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Any , A : List[str]=5_0_2_5_7 , A : Optional[Any]=2_0_4_8 , A : Any=2_0_4_8 , A : Union[str, Any]=2_4 , A : str=[[["global", "local"], 1_2]] , A : Optional[Any]=1_6 , A : Union[str, Any]=None , A : List[Any]=2_5_6 , A : str="gelu_new" , A : Optional[int]=0.0 , A : List[Any]=0.0 , A : List[str]=0.0 , A : Tuple=0.1 , A : int=1e-5 , A : Any=0.02 , A : int=True , A : Any=5_0_2_5_6 , A : Dict=5_0_2_5_6 , **A : Any , ) ->Optional[int]:
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_layers
lowerCamelCase__ : Union[str, Any] = num_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[int] = window_size
lowerCamelCase__ : Any = activation_function
lowerCamelCase__ : Dict = resid_dropout
lowerCamelCase__ : Union[str, Any] = embed_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = classifier_dropout
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Optional[int] = bos_token_id
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Dict = attention_types
lowerCamelCase__ : List[Any] = self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@staticmethod
def __lowerCamelCase ( A : Any ) ->Union[str, Any]:
lowerCamelCase__ : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
import torch
lowerCamelCase__ : List[str] = input.size()
lowerCamelCase__ : Any = len(a_ )
lowerCamelCase__ : int = shape[dimension]
lowerCamelCase__ : Union[str, Any] = torch.arange(0 , a_ , a_ )
lowerCamelCase__ : List[Any] = torch.div(sizedim - size , a_ , rounding_mode='''floor''' ) + 1
lowerCamelCase__ : List[str] = torch.arange(a_ ) + low_indices[:min_length][:, None]
lowerCamelCase__ : Union[str, Any] = [slice(a_ )] * rank
lowerCamelCase__ : List[str] = indices
lowerCamelCase__ : List[str] = input[s]
lowerCamelCase__ : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
import torch
lowerCamelCase__ : Optional[int] = torch.arange(1 , a_ )
lowerCamelCase__ : Dict = torch.remainder(a_ , a_ )
lowerCamelCase__ : Any = remainders == 0
lowerCamelCase__ : int = candidates[divisor_indices]
lowerCamelCase__ : int = torch.max(a_ )
return largest_divisor, torch.div(a_ , a_ , rounding_mode='''floor''' )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
lowerCamelCase__ : str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCamelCase ( self : List[Any] ) ->int:
return self._config.num_heads
def __lowerCamelCase ( self : Dict , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : Union[str, Any] = super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : int = seqlen + 2
lowerCamelCase__ : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Optional[Any] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
lowerCamelCase__ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
lowerCamelCase__ : Any = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[int] ) ->int:
return 1_3
| 315 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 0 |
import datasets
_UpperCAmelCase = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_UpperCAmelCase = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_UpperCAmelCase = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int] ) -> Optional[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCamelCase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
| 699 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 1 |
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Tuple ):
A = val
A = None
A = None
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if self.val:
if val < self.val:
if self.left is None:
A = Node(UpperCamelCase__ )
else:
self.left.insert(UpperCamelCase__ )
elif val > self.val:
if self.right is None:
A = Node(UpperCamelCase__ )
else:
self.right.insert(UpperCamelCase__ )
else:
A = val
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : List[str] ) -> Dict:
# Recursive traversal
if root:
inorder(root.left, lowerCAmelCase )
res.append(root.val )
inorder(root.right, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> Any:
# Build BST
if len(lowerCAmelCase ) == 0:
return arr
A = Node(arr[0] )
for i in range(1, len(lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
A = []
inorder(lowerCAmelCase, lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 699 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A = _modexpt(lowerCAmelCase, exponent // 2, lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCAmelCase, exponent - 1, lowerCAmelCase )) % modulo_value
def __UpperCamelCase (lowerCAmelCase : int = 1_777, lowerCAmelCase : int = 1_855, lowerCAmelCase : int = 8 ) -> int:
A = base
for _ in range(1, lowerCAmelCase ):
A = _modexpt(lowerCAmelCase, lowerCAmelCase, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''van'''
def __init__( self : Any , UpperCamelCase__ : List[str]=224 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=[7, 3, 3, 3] , UpperCamelCase__ : Dict=[4, 2, 2, 2] , UpperCamelCase__ : Dict=[64, 128, 320, 512] , UpperCamelCase__ : Union[str, Any]=[3, 3, 12, 3] , UpperCamelCase__ : List[str]=[8, 8, 4, 4] , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-6 , UpperCamelCase__ : Optional[int]=1e-2 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , **UpperCamelCase__ : str , ):
super().__init__(**UpperCamelCase__ )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate
| 699 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Dict=False ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
A = os.path.abspath(lowerCAmelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
A = torch.load(lowerCAmelCase, map_location='cpu' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
A = convert_pytorch_state_dict_to_flax(lowerCAmelCase, lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase, lowerCAmelCase )
return flax_state_dict
def __UpperCamelCase (lowerCAmelCase : Tuple[str], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, jnp.ndarray], lowerCAmelCase : str, ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A = pt_tuple_key[-2] + '_v'
if name is not None:
A = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Union[str, Any] ) -> List[str]:
# convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
A = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A = flax_model.params['params']
else:
A = flax_model.params
A = flatten_dict(lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(lowerCAmelCase )
A = {}
A = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
A = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
A = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A = pt_tuple_key[1:]
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# add model prefix if necessary
A = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase, lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any ) -> List[str]:
import torch
# Load the index
A = {}
for shard_file in shard_filenames:
# load using msgpack utils
A = torch.load(lowerCAmelCase )
A = {k: v.numpy() for k, v in pt_state_dict.items()}
A = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A = flax_model.params['params']
A = flatten_dict(lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
A = flax_model.params
A = flatten_dict(lowerCAmelCase )
A = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
A = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
A = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A = pt_tuple_key[1:]
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# add model prefix if necessary
A = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A = jnp.asarray(lowerCAmelCase )
continue
if "var" in flax_key[-1]:
A = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase, lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Tuple:
A = os.path.abspath(lowerCAmelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
A = getattr(lowerCAmelCase, 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase, 'rb' ) as state_f:
try:
A = from_bytes(lowerCAmelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Tuple ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
A = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa, lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
A = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowerCAmelCase )
A = flatten_dict(lowerCAmelCase )
A = pt_model.state_dict()
A = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
A = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A = []
A = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A = flax_key_tuple[0] == pt_model.base_model_prefix
A = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase ) not in pt_model_dict:
# conv layer
A = flax_key_tuple[:-1] + ('weight',)
A = jnp.transpose(lowerCAmelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ) not in pt_model_dict:
# linear layer
A = flax_key_tuple[:-1] + ('weight',)
A = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
A = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
A = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A = '.'.join(lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A = key.split('.' )
A = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
A = key_components[-2] + '_v'
if name is not None:
A = key_components[:-3] + [name]
A = '.'.join(lowerCAmelCase )
A = key
if flax_key in special_pt_names:
A = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
A = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase, np.ndarray ) else flax_tensor
A = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
A = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(lowerCAmelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 699 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Any=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[str]=("DownEncoderBlock2D",) , UpperCamelCase__ : str=(64,) , UpperCamelCase__ : str=2 , UpperCamelCase__ : str=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : str=True , ):
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A = output_channel
A = block_out_channels[i]
A = i == len(UpperCamelCase__ ) - 1
A = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A = False
def UpperCamelCase ( self : int , UpperCamelCase__ : Tuple ):
A = x
A = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : List[str] ):
def custom_forward(*UpperCamelCase__ : Tuple ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A = down_block(UpperCamelCase__ )
# middle
A = self.mid_block(UpperCamelCase__ )
# post-process
A = self.conv_norm_out(UpperCamelCase__ )
A = self.conv_act(UpperCamelCase__ )
A = self.conv_out(UpperCamelCase__ )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Optional[int]=("UpDecoderBlock2D",) , UpperCamelCase__ : int=(64,) , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Union[str, Any]="silu" , UpperCamelCase__ : str="group" , ):
super().__init__()
A = layers_per_block
A = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A = list(reversed(UpperCamelCase__ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(UpperCamelCase__ ) - 1
A = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A = False
def UpperCamelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=None ):
A = z
A = self.conv_in(UpperCamelCase__ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(UpperCamelCase__ )
else:
A = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A = self.conv_act(UpperCamelCase__ )
A = self.conv_out(UpperCamelCase__ )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=True ):
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A = n_e
A = sane_index_shape
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
A = inds.shape
assert len(UpperCamelCase__ ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(UpperCamelCase__ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Tuple ):
A = inds.shape
assert len(UpperCamelCase__ ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 , 2 , 3 , 1 ).contiguous()
A = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A = self.embedding(UpperCamelCase__ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A = self.remap_to_used(UpperCamelCase__ )
A = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] , -1 ) # add batch axis
A = self.unmap_to_all(UpperCamelCase__ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(UpperCamelCase__ )
if shape is not None:
A = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any=False ):
A = parameters
A , A = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A = torch.clamp(self.logvar , -30.0 , 20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def UpperCamelCase ( self : str , UpperCamelCase__ : Tuple=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCamelCase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
return self.mean
| 699 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 16
_UpperCAmelCase = 32
def __UpperCamelCase (lowerCAmelCase : Accelerator, lowerCAmelCase : int = 16 ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase, max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A = datasets.map(
lowerCAmelCase, batched=lowerCAmelCase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A = 16
elif accelerator.mixed_precision != "no":
A = 8
else:
A = None
return tokenizer.pad(
lowerCAmelCase, padding='longest', max_length=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_tensors='pt', )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase, collate_fn=lowerCAmelCase, batch_size=lowerCAmelCase )
A = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase, collate_fn=lowerCAmelCase, batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase = mocked_dataloaders # noqa: F811
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase ) == "1":
A = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with='all', project_dir=args.project_dir )
else:
A = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
set_seed(lowerCAmelCase )
A , A = get_dataloaders(lowerCAmelCase, lowerCAmelCase )
A = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A = batch_size // MAX_GPU_BATCH_SIZE
A = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A = model.to(accelerator.device )
# Instantiate optimizer
A = AdamW(params=model.parameters(), lr=lowerCAmelCase )
# Instantiate scheduler
A = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A = os.path.split(lowerCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(lowerCAmelCase, lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A = 0
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A = model(**lowerCAmelCase )
A = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A = model(**lowerCAmelCase )
A = outputs.logits.argmax(dim=-1 )
A , A = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase, references=lowerCAmelCase, )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(lowerCAmelCase ),
'epoch': epoch,
}, step=lowerCAmelCase, )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase () -> Tuple:
A = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase, default=lowerCAmelCase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking', action='store_true', help='Whether to load in all available experiment trackers from the environment and use them for logging.', )
parser.add_argument(
'--project_dir', type=lowerCAmelCase, default='logs', help='Location on where to store experiment tracking logs` and relevent project information', )
A = parser.parse_args()
A = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
main()
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
A = state_dict.pop(lowerCAmelCase )
A = val
def __UpperCamelCase (lowerCAmelCase : Any ) -> Tuple:
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
A = value
else:
A = value
return new_state_dict
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int]=False ) -> List[Any]:
A = ''
if is_panoptic:
A = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
def __UpperCamelCase () -> List[Any]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Union[str, Any] ) -> str:
A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A = 'resnet101'
if "dc5" in model_name:
A = True
A = 'panoptic' in model_name
if is_panoptic:
A = 250
else:
A = 91
A = 'huggingface/label-files'
A = 'coco-detection-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase, lowerCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
# load image processor
A = 'coco_panoptic' if is_panoptic else 'coco_detection'
A = ConditionalDetrImageProcessor(format=lowerCAmelCase )
# prepare image
A = prepare_img()
A = image_processor(images=lowerCAmelCase, return_tensors='pt' )
A = encoding['pixel_values']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
A = torch.hub.load('DeppMeng/ConditionalDETR', lowerCAmelCase, pretrained=lowerCAmelCase ).eval()
A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A = 'conditional_detr.' + src
rename_key(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
A = rename_backbone_keys(lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase, is_panoptic=lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
A = state_dict.pop(lowerCAmelCase )
A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A = state_dict.pop(lowerCAmelCase )
A = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
A = state_dict.pop(lowerCAmelCase )
A = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A = state_dict.pop(lowerCAmelCase )
A = val
# finally, create HuggingFace model and load state dict
A = ConditionalDetrForSegmentation(lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=lowerCAmelCase, organization='DepuMeng', commit_message='Add model' )
# verify our conversion
A = conditional_detr(lowerCAmelCase )
A = model(lowerCAmelCase )
assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 699 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 1 |
import requests
from bsa import BeautifulSoup
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : dict ) -> str:
A = BeautifulSoup(requests.get(lowerCAmelCase, params=lowerCAmelCase ).content, 'html.parser' )
A = soup.find('div', attrs={'class': 'gs_ri'} )
A = div.find('div', attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2_018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
import numpy as np
_UpperCAmelCase = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ):
A = np.array(UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : str ):
A , A = np.where(letter == self.SQUARE )
A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase ( self : int , UpperCamelCase__ : str ):
A = message.lower()
A = message.replace(' ' , '' )
A = message.replace('j' , 'i' )
A = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A = self.letter_to_numbers(message[letter_index] )
A = numbers[0]
A = numbers[1]
A = first_step.reshape(2 * len(UpperCamelCase__ ) )
A = ''
for numbers_index in range(len(UpperCamelCase__ ) ):
A = int(second_step[numbers_index * 2] )
A = int(second_step[(numbers_index * 2) + 1] )
A = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A = encoded_message + letter
return encoded_message
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : str ):
A = message.lower()
message.replace(' ' , '' )
A = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A = self.letter_to_numbers(message[letter_index] )
A = numbers[0]
A = numbers[1]
A = first_step.reshape((2, len(UpperCamelCase__ )) )
A = ''
for numbers_index in range(len(UpperCamelCase__ ) ):
A = int(second_step[0, numbers_index] )
A = int(second_step[1, numbers_index] )
A = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A = decoded_message + letter
return decoded_message
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
from __future__ import annotations
_UpperCAmelCase = 1.6_021E-19 # units = C
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : float, lowerCAmelCase : float, ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''whisper'''
SCREAMING_SNAKE_CASE : List[str] = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , UpperCamelCase__ : int=51865 , UpperCamelCase__ : str=80 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[Any]=1536 , UpperCamelCase__ : int=1536 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : int=50257 , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=1500 , UpperCamelCase__ : Union[str, Any]=448 , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : str=50256 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=[220, 50256] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=256 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=0.05 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Union[str, Any]=7 , **UpperCamelCase__ : str , ):
A = vocab_size
A = num_mel_bins
A = d_model
A = encoder_layers
A = encoder_attention_heads
A = decoder_layers
A = decoder_attention_heads
A = decoder_ffn_dim
A = encoder_ffn_dim
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A = classifier_proj_size
A = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A = apply_spec_augment
A = mask_time_prob
A = mask_time_length
A = mask_time_min_masks
A = mask_feature_prob
A = mask_feature_length
A = mask_feature_min_masks
A = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , suppress_tokens=UpperCamelCase__ , begin_suppress_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
A = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
return common_inputs
def UpperCamelCase ( self : str , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 22050 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : int = 220 , ):
A = OrderedDict()
A = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase__ , framework=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , time_duration=UpperCamelCase__ , frequency=UpperCamelCase__ , )
A = encoder_inputs['input_features'].shape[2]
A = encoder_sequence_length // 2 if self.use_past else seq_length
A = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = encoder_inputs.pop('input_features' )
A = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
A = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCamelCase ( self : Dict ):
return 1e-3
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE : Optional[int] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ):
A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
A = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
A = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCamelCase ( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Dict ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Dict , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
requires_backends(cls , ['flax', 'transformers'] )
class _UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
requires_backends(cls , ['flax', 'transformers'] )
class _UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ['''flax''', '''transformers''']
def __init__( self : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ):
requires_backends(cls , ['flax', 'transformers'] )
class _UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''flax''', '''transformers''']
def __init__( self : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ):
requires_backends(cls , ['flax', 'transformers'] )
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __UpperCamelCase () -> Dict:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase ):
requests.request('GET', 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET', 'https://huggingface.co', timeout=1.0 )
@pytest.mark.integration
def __UpperCamelCase () -> str:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET', 'https://huggingface.co' )
def __UpperCamelCase () -> Any:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase ):
http_head('https://huggingface.co' )
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import math
import os
import sys
def __UpperCamelCase (lowerCAmelCase : str ) -> str:
A = ''
try:
with open(lowerCAmelCase, 'rb' ) as binary_file:
A = binary_file.read()
for dat in data:
A = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase (lowerCAmelCase : dict[str, str], lowerCAmelCase : str, lowerCAmelCase : int, lowerCAmelCase : str ) -> None:
lexicon.pop(lowerCAmelCase )
A = last_match_id
if math.loga(lowerCAmelCase ).is_integer():
for curr_key in lexicon:
A = '0' + lexicon[curr_key]
A = bin(lowerCAmelCase )[2:]
def __UpperCamelCase (lowerCAmelCase : str ) -> str:
A = {'0': '0', '1': '1'}
A , A = '', ''
A = len(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
index += 1
A = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> str:
A = os.path.getsize(lowerCAmelCase )
A = bin(lowerCAmelCase )[2:]
A = len(lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> None:
A = 8
try:
with open(lowerCAmelCase, 'wb' ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0, len(lowerCAmelCase ), lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> None:
A = read_file_binary(lowerCAmelCase )
A = compress_data(lowerCAmelCase )
A = add_file_length(lowerCAmelCase, lowerCAmelCase )
write_file_binary(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase (lowerCAmelCase : Tuple=None ) -> int:
if subparsers is not None:
A = subparsers.add_parser('env' )
else:
A = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def __UpperCamelCase (lowerCAmelCase : Union[str, Any] ) -> Tuple:
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(lowerCAmelCase ),
'PyTorch NPU available': str(lowerCAmelCase ),
'System RAM': f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
A = (
'\n'.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase, lowerCAmelCase )
else f'''\t{accelerate_config}'''
)
print(lowerCAmelCase )
A = accelerate_config
return info
def __UpperCamelCase () -> int:
A = env_command_parser()
A = parser.parse_args()
env_command(lowerCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Tuple ):
A = 1
A = 3
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def UpperCamelCase ( self : int ):
def extract(*UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ):
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
A = torch.ones([0] )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
self.pixel_values.to(UpperCamelCase__ )
return self
return Out()
return extract
def UpperCamelCase ( self : Optional[int] ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A painting of a squirrel eating a burger'
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A = sd_pipe([prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
A = output.images
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase__ , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : List[Any] ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A painting of a squirrel eating a burger'
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A = sd_pipe([prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
A = output.images
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase__ , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : int ):
A = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(pipe.scheduler , UpperCamelCase__ )
assert pipe.safety_checker is None
A = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : Optional[int] ):
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
A = unet.half()
A = vae.half()
A = bert.half()
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A painting of a squirrel eating a burger'
A = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int ):
A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCamelCase__ )
A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
A = 4003660346
A = 7
# without safety guidance (sld_guidance_scale = 0)
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Optional[int] ):
A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCamelCase__ )
A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'padme amidala taking a bath artwork, safe for work, no nudity'
A = 2734971755
A = 7
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Optional[int] ):
A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
A = 1044355234
A = 12
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
A = torch.manual_seed(UpperCamelCase__ )
A = sd_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A = output.images
A = image[0, -3:, -3:, -1]
A = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 699 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __UpperCamelCase (lowerCAmelCase : str ) -> List[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
A = model_type_to_module_name(lowerCAmelCase )
A = importlib.import_module(f'''.{module_name}''', 'transformers.models' )
try:
return getattr(lowerCAmelCase, lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase, '__name__', lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase, lowerCAmelCase ):
return getattr(lowerCAmelCase, lowerCAmelCase )
return None
def __UpperCamelCase (lowerCAmelCase : Union[str, os.PathLike], lowerCAmelCase : Optional[Union[str, os.PathLike]] = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[Dict[str, str]] = None, lowerCAmelCase : Optional[Union[bool, str]] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : bool = False, **lowerCAmelCase : Any, ) -> List[Any]:
A = get_file_from_repo(
lowerCAmelCase, lowerCAmelCase, cache_dir=lowerCAmelCase, force_download=lowerCAmelCase, resume_download=lowerCAmelCase, proxies=lowerCAmelCase, use_auth_token=lowerCAmelCase, revision=lowerCAmelCase, local_files_only=lowerCAmelCase, )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase, encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def UpperCamelCase ( cls : List[str] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
A = kwargs.pop('config' , UpperCamelCase__ )
A = kwargs.pop('trust_remote_code' , UpperCamelCase__ )
A = True
A , A = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase__ , **UpperCamelCase__ )
A = config_dict.get('image_processor_type' , UpperCamelCase__ )
A = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
A = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
A = config_dict.pop('feature_extractor_type' , UpperCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
A = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A = config_dict['auto_map']['AutoFeatureExtractor']
A = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.image_processor_type``
A = getattr(UpperCamelCase__ , 'image_processor_type' , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
A = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
A = image_processor_class_from_name(UpperCamelCase__ )
A = image_processor_auto_map is not None
A = image_processor_class is not None or type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
A = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
A = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
A = kwargs.pop('code_revision' , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
A = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase__ )]
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 699 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Dropout(p=UpperCamelCase__ )
A = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
A = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
A = self.token_embedder(UpperCamelCase__ )
A = encoder_input_tokens.shape[1]
A = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
A = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
A = encoder_input_tokens.size()
A = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
A = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
A = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 699 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 1 |
from collections import namedtuple
_UpperCAmelCase = namedtuple("from_to", "from_ to")
_UpperCAmelCase = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_0_1, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"cubicfoot": from_to(0.0_2_8, 3_5.3_1_4_7),
"cup": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : str, lowerCAmelCase : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(lowerCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(lowerCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : str ):
super().__init__()
A = model
A = 2
A = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCamelCase ( self : int ):
pass
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str ) -> List[str]:
# load longformer model from model identifier
A = LongformerModel.from_pretrained(lowerCAmelCase )
A = LightningModel(lowerCAmelCase )
A = torch.load(lowerCAmelCase, map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
A = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 699 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = PandasConfig
def UpperCamelCase ( self : Any ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self : int , UpperCamelCase__ : int ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
A = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
A = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase ( self : int , UpperCamelCase__ : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(UpperCamelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Any ):
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , 'rb' ) as f:
A = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase__ ) )
yield i, self._cast_table(UpperCamelCase__ )
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : str=5 ) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
A = torch.tensor(tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
A = model(lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A = logits[0, masked_index, :]
A = logits.softmax(dim=0 )
A , A = prob.topk(k=lowerCAmelCase, dim=0 )
A = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase ) )] )
A = tokenizer.mask_token
A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
A = predicted_token_bpe.replace('\u2581', ' ' )
if " {0}".format(lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(lowerCAmelCase ), lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase, lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_UpperCAmelCase = CamembertTokenizer.from_pretrained("camembert-base")
_UpperCAmelCase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_UpperCAmelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 699 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : List[str]=False ) -> Optional[int]:
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : List[Any], lowerCAmelCase : List[str]=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
A = ''
else:
A = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase (lowerCAmelCase : Dict ) -> List[str]:
A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Dict ) -> Tuple:
A = dct.pop(lowerCAmelCase )
A = val
def __UpperCamelCase () -> int:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[int]:
A = ViTConfig()
A = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A = True
A = int(vit_name[-12:-10] )
A = int(vit_name[-9:-6] )
else:
A = 1_000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase, lowerCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = int(vit_name[-6:-4] )
A = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
A = 192
A = 768
A = 12
A = 3
elif vit_name[9:].startswith('small' ):
A = 384
A = 1_536
A = 12
A = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
A = 768
A = 2_304
A = 8
A = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
A = 1_024
A = 4_096
A = 24
A = 16
elif vit_name[4:].startswith('huge' ):
A = 1_280
A = 5_120
A = 32
A = 16
# load original model from timm
A = timm.create_model(lowerCAmelCase, pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
A = create_rename_keys(lowerCAmelCase, lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A = ViTModel(lowerCAmelCase ).eval()
else:
A = ViTForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A = DeiTImageProcessor(size=config.image_size )
else:
A = ViTImageProcessor(size=config.image_size )
A = image_processor(images=prepare_img(), return_tensors='pt' )
A = encoding['pixel_values']
A = model(lowerCAmelCase )
if base_model:
A = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase, outputs.pooler_output, atol=1E-3 )
else:
A = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
import random
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : Union[str, Any] ) -> tuple:
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase )
elif element > pivot:
greater.append(lowerCAmelCase )
else:
equal.append(lowerCAmelCase )
return less, equal, greater
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : int ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCAmelCase ) or index < 0:
return None
A = items[random.randint(0, len(lowerCAmelCase ) - 1 )]
A = 0
A , A , A = _partition(lowerCAmelCase, lowerCAmelCase )
A = len(lowerCAmelCase )
A = len(lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase, lowerCAmelCase )
# must be in larger
else:
return quick_select(lowerCAmelCase, index - (m + count) )
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
from math import sqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
A = True
# 0 and 1 are none primes.
if number <= 1:
A = False
for divisor in range(2, int(round(sqrt(lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
A = False
break
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'status' must been from type bool"
return status
def __UpperCamelCase (lowerCAmelCase : Any ) -> str:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
A = list(range(2, n + 1 ) )
A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase ) ):
for j in range(i + 1, len(lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
A = 0
# filters actual prime numbers.
A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(lowerCAmelCase ):
ans.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCamelCase (lowerCAmelCase : Tuple ) -> Optional[int]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
A = [] # this list will be returns of the function.
# potential prime number factors.
A = 2
A = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase ):
while quotient != 1:
if is_prime(lowerCAmelCase ) and (quotient % factor == 0):
ans.append(lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCamelCase (lowerCAmelCase : Any ) -> Any:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(lowerCAmelCase )
A = max(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'ans' must been from type int"
return ans
def __UpperCamelCase (lowerCAmelCase : Tuple ) -> Union[str, Any]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(lowerCAmelCase )
A = min(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'ans' must been from type int"
return ans
def __UpperCamelCase (lowerCAmelCase : str ) -> int:
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def __UpperCamelCase (lowerCAmelCase : int ) -> Any:
assert isinstance(lowerCAmelCase, lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> List[str]:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase ) and (number > 2) and is_even(lowerCAmelCase )
), "'number' must been an int, even and > 2"
A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
A = get_prime_numbers(lowerCAmelCase )
A = len(lowerCAmelCase )
# run variable for while-loops.
A = 0
A = None
# exit variable. for break up the loops
A = True
while i < len_pn and loop:
A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and (len(lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
A = 0
while numbera != 0:
A = numbera % numbera
A = numbera
A = rest
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : List[str] ) -> List[Any]:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
A = prime_factorization(lowerCAmelCase )
A = prime_factorization(lowerCAmelCase )
elif numbera == 1 or numbera == 1:
A = []
A = []
A = max(lowerCAmelCase, lowerCAmelCase )
A = 0
A = 0
A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
A = prime_fac_a.count(lowerCAmelCase )
A = prime_fac_a.count(lowerCAmelCase )
for _ in range(max(lowerCAmelCase, lowerCAmelCase ) ):
ans *= n
else:
A = prime_fac_a.count(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
ans *= n
done.append(lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
A = prime_fac_a.count(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
ans *= n
done.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __UpperCamelCase (lowerCAmelCase : str ) -> Tuple:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
A = 0
A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and is_prime(
lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int] ) -> Dict:
assert (
is_prime(lowerCAmelCase ) and is_prime(lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
A = p_number_a + 1 # jump to the next number
A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __UpperCamelCase (lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
A = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Union[str, Any]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
A = get_divisors(lowerCAmelCase )
# precondition
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int] ) -> List[str]:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
A = gcd(abs(lowerCAmelCase ), abs(lowerCAmelCase ) )
# precondition
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __UpperCamelCase (lowerCAmelCase : str ) -> Optional[int]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
A = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def __UpperCamelCase (lowerCAmelCase : str ) -> Optional[Any]:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
A = 0
A = 1
A = 1 # this will be return
for _ in range(n - 1 ):
A = ans
ans += fiba
A = tmp
return ans
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCamelCase (lowerCAmelCase : np.ndarray ) -> np.ndarray:
A , A , A = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __UpperCamelCase (lowerCAmelCase : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray ) -> np.ndarray:
A = np.zeros_like(lowerCAmelCase )
A = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_UpperCAmelCase = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
_UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
_UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_UpperCAmelCase = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A = 1
if upper_limit > 0:
A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import os
from distutils.util import strtobool
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : List[Any] ) -> int:
for e in env_keys:
A = int(os.environ.get(lowerCAmelCase, -1 ) )
if val >= 0:
return val
return default
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Tuple=False ) -> int:
A = os.environ.get(lowerCAmelCase, str(lowerCAmelCase ) )
return strtobool(lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : List[Any]="no" ) -> int:
A = os.environ.get(lowerCAmelCase, str(lowerCAmelCase ) )
return value
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCamelCase () -> Optional[int]:
A = argparse.ArgumentParser()
parser.add_argument(
'-m', '--pretrained_model_name_or_path', type=lowerCAmelCase, default=lowerCAmelCase, required=lowerCAmelCase, help='Path to pretrained model or model identifier from huggingface.co/models.', )
parser.add_argument(
'-c', '--caption', type=lowerCAmelCase, default='robotic cat with wings', help='Text used to generate images.', )
parser.add_argument(
'-n', '--images_num', type=lowerCAmelCase, default=4, help='How much images to generate.', )
parser.add_argument(
'-s', '--seed', type=lowerCAmelCase, default=42, help='Seed for random process.', )
parser.add_argument(
'-ci', '--cuda_id', type=lowerCAmelCase, default=0, help='cuda_id.', )
A = parser.parse_args()
return args
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[Any], lowerCAmelCase : str ) -> Dict:
if not len(lowerCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
A , A = imgs[0].size
A = Image.new('RGB', size=(cols * w, rows * h) )
A , A = grid.size
for i, img in enumerate(lowerCAmelCase ):
grid.paste(lowerCAmelCase, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : int="robotic cat with wings", lowerCAmelCase : Optional[Any]=7.5, lowerCAmelCase : List[Any]=50, lowerCAmelCase : int=1, lowerCAmelCase : Optional[Any]=42, ) -> List[Any]:
A = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase )
A = pipeline(
lowerCAmelCase, guidance_scale=lowerCAmelCase, num_inference_steps=lowerCAmelCase, generator=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, ).images
A = int(math.sqrt(lowerCAmelCase ) )
A = image_grid(lowerCAmelCase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
_UpperCAmelCase = parse_args()
# Load models and create wrapper for stable diffusion
_UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
_UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
_UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
_UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_UpperCAmelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
_UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
_UpperCAmelCase = unet.to(torch.device("cuda", args.cuda_id))
_UpperCAmelCase = pipeline.to(unet.device)
_UpperCAmelCase , _UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
_UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : List[Any], lowerCAmelCase : Tuple ) -> Union[str, Any]:
# Construct model
if openai_config_file == "":
A = OpenAIGPTConfig()
else:
A = OpenAIGPTConfig.from_json_file(lowerCAmelCase )
A = OpenAIGPTModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Save pytorch-model
A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), lowerCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.