code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_snake_case = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
_A : Optional[Any] = label_idx
def a__ ( self , _a , _a ) -> List[InputExample]:
if isinstance(_a , _a ):
_A : Dict = mode.value
_A : Optional[int] = os.path.join(_a , F'''{mode}.txt''' )
_A : Optional[int] = 1
_A : Dict = []
with open(_a , encoding="""utf-8""" ) as f:
_A : Optional[int] = []
_A : Optional[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
guid_index += 1
_A : List[str] = []
_A : Union[str, Any] = []
else:
_A : Union[str, Any] = line.split(""" """ )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
return examples
def a__ ( self , _a , _a , _a ) -> Dict:
_A : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_A : List[Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_a )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def a__ ( self , _a ) -> List[str]:
if path:
with open(_a , """r""" ) as f:
_A : List[Any] = f.read().splitlines()
if "O" not in labels:
_A : Dict = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( UpperCamelCase__ ):
def __init__( self ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a__ ( self , _a ) -> List[str]:
if path:
with open(_a , """r""" ) as f:
_A : int = f.read().splitlines()
if "O" not in labels:
_A : Optional[int] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( UpperCamelCase__ ):
def a__ ( self , _a , _a ) -> List[InputExample]:
if isinstance(_a , _a ):
_A : List[str] = mode.value
_A : Tuple = os.path.join(_a , F'''{mode}.txt''' )
_A : Any = 1
_A : Union[str, Any] = []
with open(_a , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_a ):
_A : Union[str, Any] = []
_A : Dict = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
guid_index += 1
return examples
def a__ ( self , _a , _a , _a ) -> Dict:
_A : Optional[int] = 0
for sentence in parse_incr(_a ):
_A : Optional[int] = preds_list[example_id]
_A : int = """"""
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_a )
example_id += 1
def a__ ( self , _a ) -> List[str]:
if path:
with open(_a , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase_ ( snake_case_=32,snake_case_=10,snake_case_=100,snake_case_=1026,snake_case_=True,snake_case_="data/tokenized_stories_train_wikitext103.jbl",snake_case_="igf_context_pairs.jbl",):
set_seed(3 )
# generate train_data and objective_set
_A , _A : List[Any] = generate_datasets(
snake_case_,snake_case_,number=snake_case_,min_len=1026,trim=snake_case_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_A : List[str] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_A : List[str] = load_gpta("""gpt2""" ).to(snake_case_ )
print("""computing perplexity on objective set""" )
_A : Optional[int] = compute_perplexity(snake_case_,snake_case_,snake_case_ ).item()
print("""perplexity on objective set:""",snake_case_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase_ ( snake_case_,snake_case_=15,snake_case_=128,snake_case_=100,snake_case_="igf_model.pt",):
set_seed(42 )
# Load pre-trained model
_A : Union[str, Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_A : Any = SecondaryLearner(snake_case_ )
# Train secondary learner
_A : Tuple = train_secondary_learner(
snake_case_,snake_case_,max_epochs=snake_case_,batch_size=snake_case_,eval_freq=100,igf_model_path=snake_case_,)
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=32,snake_case_=1000,snake_case_=16,snake_case_=1.0,snake_case_=recopy_gpta,snake_case_=None,snake_case_=10,snake_case_="gpt2_finetuned.pt",):
_A : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_A : Dict = RandomSampler(snake_case_ )
_A : List[Any] = DataLoader(snake_case_,sampler=snake_case_ )
_A : Any = max_steps // (len(snake_case_ )) + 1
_A : Union[str, Any] = 0
_A : int = torch.zeros((1, context_len),dtype=torch.long,device=snake_case_ )
_A , _A , _A : Optional[Any] = recopy_model(snake_case_,snake_case_,snake_case_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case_ )
secondary_learner.eval()
_A : Dict = []
_A : Tuple = 0
_A : Optional[Any] = []
_A : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
_A : Dict = compute_perplexity(snake_case_,snake_case_,snake_case_ )
test_perps.append(snake_case_ )
print("""Test perplexity, step""",snake_case_,""":""",snake_case_ )
for epoch in range(int(snake_case_ ) ):
for step, example in enumerate(snake_case_ ):
torch.cuda.empty_cache()
_A : Optional[Any] = random.randint(0,example.size(2 ) - context_len - 1 )
_A : Any = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_A : Optional[Any] = model(snake_case_,labels=snake_case_ )
_A : List[Any] = True
if secondary_learner is not None:
_A : Optional[int] = secondary_learner.forward(
torch.tensor(snake_case_,dtype=torch.long,device=snake_case_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_A : Union[str, Any] = -1
if predicted_q < threshold:
_A : int = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_A : Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_A : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(),3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_A : Any = compute_perplexity(snake_case_,snake_case_,snake_case_ )
test_perps.append(snake_case_ )
print("""Test perplexity, step""",snake_case_,""":""",snake_case_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(),snake_case_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase_ ( ):
_A : str = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""",default=snake_case_,type=snake_case_,required=snake_case_,help="""The input data dir. Should contain data files for WikiText.""",)
parser.add_argument(
"""--model_name_or_path""",default=snake_case_,type=snake_case_,required=snake_case_,help="""Path to pretrained model or model identifier from huggingface.co/models""",)
parser.add_argument(
"""--data_file""",type=snake_case_,default=snake_case_,help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
),)
parser.add_argument(
"""--igf_data_file""",type=snake_case_,default=snake_case_,help="""A jbl file containing the context and information gain pairs to train secondary learner.""",)
parser.add_argument(
"""--output_dir""",default=snake_case_,type=snake_case_,required=snake_case_,help="""The output directory where the final fine-tuned model is stored.""",)
parser.add_argument(
"""--tokenizer_name""",default=snake_case_,type=snake_case_,help="""Pretrained tokenizer name or path if not the same as model_name""",)
parser.add_argument("""--seed""",type=snake_case_,default=snake_case_,help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""",default=32,type=snake_case_,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
),)
parser.add_argument(
"""--size_objective_set""",default=100,type=snake_case_,help="""number of articles that are long enough to be used as our objective set""",)
parser.add_argument(
"""--eval_freq""",default=100,type=snake_case_,help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""",default=1000,type=snake_case_,help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""",default=128,type=snake_case_,help="""batch size of training data for secondary learner""",)
parser.add_argument(
"""--batch_size""",default=16,type=snake_case_,help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""",default=10,type=snake_case_,help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
),)
parser.add_argument(
"""--number""",default=100,type=snake_case_,help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""",default=1026,type=snake_case_,help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""",default=15,type=snake_case_,help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""",default=snake_case_,type=snake_case_,help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""",default=1.0,type=snake_case_,help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
),)
parser.add_argument("""--finetuned_model_name""",default="""gpt2_finetuned.pt""",type=snake_case_,help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""",default=snake_case_,type=snake_case_,help="""Reset the model to the original pretrained GPT-2 weights after each iteration""",)
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32,max_steps=10,size_objective_set=100,min_len=1026,trim=snake_case_,data_file="""data/tokenized_stories_train_wikitext103.jbl""",igf_data_file="""igf_context_pairs.jbl""",)
# Load train data for secondary learner
_A : Union[str, Any] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_A : Tuple = training_secondary_learner(
snake_case_,secondary_learner_max_epochs=15,secondary_learner_batch_size=128,eval_freq=100,igf_model_path="""igf_model.pt""",)
# load pretrained gpt2 model
_A : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_A , _A : Union[str, Any] = generate_datasets(
context_len=32,file="""data/tokenized_stories_train_wikitext103.jbl""",number=100,min_len=1026,trim=snake_case_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case_,snake_case_,snake_case_,context_len=32,max_steps=1000,batch_size=16,threshold=1.0,recopy_model=snake_case_,secondary_learner=snake_case_,eval_interval=10,finetuned_model_name="""gpt2_finetuned.pt""",)
if __name__ == "__main__":
main()
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case = datasets.utils.logging.get_logger(__name__)
_snake_case = ["names", "prefix"]
_snake_case = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_snake_case = ["encoding_errors", "on_bad_lines"]
_snake_case = ["date_format"]
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = ","
_a = None
_a = "infer"
_a = None
_a = None
_a = None
_a = None
_a = None
_a = True
_a = None
_a = None
_a = None
_a = None
_a = False
_a = None
_a = None
_a = None
_a = True
_a = True
_a = False
_a = True
_a = None
_a = "."
_a = None
_a = '"'
_a = 0
_a = None
_a = None
_a = None
_a = None
_a = True
_a = True
_a = 0
_a = True
_a = False
_a = None
_a = 1_0_0_0_0
_a = None
_a = "strict"
_a = "error"
_a = None
def a__ ( self ) -> Optional[Any]:
if self.delimiter is not None:
_A : Union[str, Any] = self.delimiter
if self.column_names is not None:
_A : Dict = self.column_names
@property
def a__ ( self ) -> str:
_A : Tuple = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder ):
_a = CsvConfig
def a__ ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> str:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : Optional[int] = data_files
if isinstance(_a , _a ):
_A : Union[str, Any] = [files]
_A : Any = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : Dict = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : Dict = [files]
_A : List[str] = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.config.features is not None:
_A : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_A : Optional[Any] = table_cast(_a , _a )
return pa_table
def a__ ( self , _a ) -> int:
_A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_A : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_A : List[str] = pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_A : str = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = LDMTextToImagePipeline
_a = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_a = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = False
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_A : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(_a )
_A : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a__ ( self , _a , _a=0 ) -> Union[str, Any]:
if str(_a ).startswith("""mps""" ):
_A : Optional[int] = torch.manual_seed(_a )
else:
_A : Any = torch.Generator(device=_a ).manual_seed(_a )
_A : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[int] = self.get_dummy_components()
_A : Tuple = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Dict = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_A : Optional[Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a=torch.floataa , _a=0 ) -> Optional[Any]:
_A : Any = torch.manual_seed(_a )
_A : List[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
_A : Optional[int] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : Optional[Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_inputs(_a )
_A : str = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_A : Tuple = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_A : List[Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a=torch.floataa , _a=0 ) -> Any:
_A : List[Any] = torch.manual_seed(_a )
_A : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
_A : Optional[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> List[Any]:
_A : str = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = self.get_inputs(_a )
_A : List[Any] = pipe(**_a ).images[0]
_A : Tuple = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
_A : List[Any] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 343 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
_A : Dict = (
list(range(ord("""!""" ),ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ),ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ),ord("""ÿ""" ) + 1 ) )
)
_A : Optional[int] = bs[:]
_A : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
_A : int = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_,snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = set()
_A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : int = char
return pairs
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Any:
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_A : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_A : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_A : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
_A : Optional[int] = json.load(_a )
_A : Optional[int] = {v: k for k, v in self.encoder.items()}
_A : Tuple = errors # how to handle errors in decoding
_A : Optional[Any] = bytes_to_unicode()
_A : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
_A : List[str] = merges_handle.read().split("""\n""" )[1:-1]
_A : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = {}
_A : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A : List[str] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Optional[int]:
return len(self.encoder )
def a__ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , _a ) -> str:
if token in self.cache:
return self.cache[token]
_A : Optional[Any] = tuple(_a )
_A : Dict = get_pairs(_a )
if not pairs:
return token
while True:
_A : Tuple = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A : List[Any] = bigram
_A : Tuple = []
_A : Tuple = 0
while i < len(_a ):
try:
_A : Optional[int] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A : List[Any] = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A : Any = tuple(_a )
_A : Optional[Any] = new_word
if len(_a ) == 1:
break
else:
_A : Optional[int] = get_pairs(_a )
_A : Dict = """ """.join(_a )
_A : str = word
return word
def a__ ( self , _a ) -> Union[str, Any]:
_A : str = []
for token in re.findall(self.pat , _a ):
_A : List[str] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def a__ ( self , _a ) -> List[str]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def a__ ( self , _a ) -> Optional[Any]:
return self.decoder.get(_a )
def a__ ( self , _a ) -> Optional[int]:
_A : Tuple = """""".join(_a )
_A : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : str = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Any = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
_A : Dict = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A : Any = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
_A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Dict = [self.sep_token_id]
_A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , _a , _a=False , **_a ) -> Tuple:
_A : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_A : str = """ """ + text
return (text, kwargs)
def a__ ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ) -> dict:
_A : int = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
_A : Any = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(_a )
if needs_to_be_padded:
_A : Optional[int] = len(_a ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_A : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=0.9 , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> List[Any]:
_A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 30}
_A : Dict = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_A : List[Any] = parent
_A : Optional[int] = batch_size
_A : Union[str, Any] = num_channels
_A : Union[str, Any] = min_resolution
_A : Optional[Any] = max_resolution
_A : int = do_resize_and_center_crop
_A : Optional[int] = size
_A : Optional[int] = crop_pct
_A : Optional[Any] = crop_size
_A : Optional[Any] = do_normalize
_A : List[str] = image_mean
_A : Any = image_std
def a__ ( self ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Dict:
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """crop_pct""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> int:
_A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> int:
# Initialize image_processing
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : List[Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Tuple:
# Initialize image_processing
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 1 |
import baseaa
def lowerCAmelCase_ ( snake_case_ ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def lowerCAmelCase_ ( snake_case_ ):
return baseaa.aaadecode(snake_case_ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase :
_a = LEDConfig
_a = {}
_a = "gelu"
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ) -> int:
_A : List[Any] = parent
_A : Optional[Any] = batch_size
_A : List[Any] = seq_length
_A : str = is_training
_A : List[Any] = use_labels
_A : Optional[Any] = vocab_size
_A : str = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : Union[str, Any] = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : Optional[int] = eos_token_id
_A : int = pad_token_id
_A : Optional[Any] = bos_token_id
_A : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_A : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_A : Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a__ ( self ) -> int:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_A : Tuple = prepare_led_inputs_dict(_a , _a , _a )
_A : Dict = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
_A : Optional[int] = global_attention_mask
return config, inputs_dict
def a__ ( self , _a , _a ) -> int:
_A : Optional[int] = TFLEDModel(config=_a ).get_decoder()
_A : Dict = inputs_dict["""input_ids"""]
_A : List[str] = input_ids[:1, :]
_A : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
_A : List[Any] = 1
# first forward pass
_A : Optional[int] = model(_a , attention_mask=_a , use_cache=_a )
_A , _A : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_A : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_A : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_A : Dict = model(_a , attention_mask=_a )[0]
_A : List[str] = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_A : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_A : Any = output_from_no_past[:, -3:, random_slice_idx]
_A : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None,snake_case_=None,snake_case_=None,):
if attention_mask is None:
_A : Optional[Any] = tf.cast(tf.math.not_equal(snake_case_,config.pad_token_id ),tf.inta )
if decoder_attention_mask is None:
_A : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:],config.pad_token_id ),tf.inta ),
],axis=-1,)
if head_mask is None:
_A : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_a = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = TFLEDModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def a__ ( self ) -> int:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[Any] = tf.zeros_like(inputs_dict["""attention_mask"""] )
_A : Dict = 2
_A : Union[str, Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_A : Union[str, Any] = True
_A : int = self.model_tester.seq_length
_A : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
_A : Any = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
_A : int = [t.numpy() for t in outputs.encoder_attentions]
_A : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_A : Optional[int] = True
_A : List[Any] = False
_A : str = False
_A : str = model_class(_a )
_A : Any = model(self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
_A : Dict = model_class(_a )
_A : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_A : List[Any] = True
_A : Union[str, Any] = model_class(_a )
_A : Union[str, Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
_A : Dict = True
_A : List[Any] = True
_A : Optional[int] = model_class(_a )
_A : Optional[int] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> Dict:
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( snake_case_ ):
return tf.constant(snake_case_,dtype=tf.intaa )
_snake_case = 1e-4
@slow
@require_tf
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
_A : Any = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_A : List[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : Optional[int] = prepare_led_inputs_dict(model.config , _a , _a )
_A : Dict = model(**_a )[0]
_A : Dict = (1, 1024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
_A : Optional[int] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_A : Dict = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : List[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
_A : Optional[Any] = model(**_a )[0]
_A : str = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
_A : Optional[Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def lowerCAmelCase_ ( snake_case_ ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_A : str = key.replace(""".model.1.bias""",""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_A : Dict = key.replace(""".model.1.weight""",""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_A : Any = key.replace(""".model.3.bias""",""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_A : Any = key.replace(""".model.3.weight""",""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_A : List[str] = key.replace("""conditioner_blocks.0""","""conditioner_blocks""" )
if "prime_prior" in key:
_A : Union[str, Any] = key.replace("""prime_prior""","""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_A : Any = key.replace(""".emb.""",""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""",""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""","""metadata_embedding.""" )
if "x_emb.emb." in key:
_A : Optional[int] = key.replace("""0.x_emb.emb""","""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""","""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""",""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""","""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""","""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""","""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""","""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""","""embed_tokens""" )
return key
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : int = {}
import re
_A : Optional[int] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_A : Optional[int] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Optional[int] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_A : List[str] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_A : List[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Optional[int] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_A : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_A : Tuple = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_A : Dict = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
_A : List[str] = re_encoder_block_conv_in.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
_A : Optional[int] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_A : int = re_encoder_block_conv_in.sub(snake_case_,snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
_A : Optional[int] = re_encoder_block_resnet.match(snake_case_ )
_A : Optional[int] = regex_match.groups()
_A : Dict = int(groups[2] ) * 2 + int(groups[3] )
_A : str = {"""1""": 1, """3""": 2}[groups[-2]]
_A : List[str] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : List[Any] = prefix + resnet_block
_A : Tuple = re_encoder_block_resnet.sub(snake_case_,snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
_A : Dict = re_encoder_block_proj_out.match(snake_case_ )
_A : Union[str, Any] = regex_match.groups()
_A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_A : List[Any] = re_encoder_block_proj_out.sub(snake_case_,snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
_A : Optional[Any] = re_decoder_block_conv_out.match(snake_case_ )
_A : Dict = regex_match.groups()
_A : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_A : int = re_decoder_block_conv_out.sub(snake_case_,snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
_A : Optional[Any] = re_decoder_block_resnet.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A : List[str] = {"""1""": 1, """3""": 2}[groups[-2]]
_A : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : int = prefix + resnet_block
_A : List[str] = re_decoder_block_resnet.sub(snake_case_,snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
_A : List[Any] = re_decoder_block_proj_in.match(snake_case_ )
_A : Optional[Any] = regex_match.groups()
_A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_A : Dict = re_decoder_block_proj_in.sub(snake_case_,snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
_A : List[Any] = re_prior_cond_conv_out.match(snake_case_ )
_A : List[str] = regex_match.groups()
_A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A : Optional[int] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_A : Tuple = re_prior_cond_conv_out.sub(snake_case_,snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
_A : Optional[int] = re_prior_cond_resnet.match(snake_case_ )
_A : Optional[Any] = regex_match.groups()
_A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
_A : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_A : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A : Union[str, Any] = prefix + resnet_block
_A : Union[str, Any] = re_prior_cond_resnet.sub(snake_case_,snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
_A : List[Any] = re_prior_cond_proj_in.match(snake_case_ )
_A : Optional[int] = regex_match.groups()
_A : Union[str, Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_A : str = re_prior_cond_proj_in.sub(snake_case_,snake_case_ )
# keep original key
else:
_A : Any = original_key
_A : Dict = replace_key(snake_case_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_A : List[str] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_A : List[str] = original_key
_A : Optional[int] = original_key
_A : Optional[int] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_=None,snake_case_=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_A : Any = requests.get(f'''{PREFIX}{file}''',allow_redirects=snake_case_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''',exist_ok=snake_case_ )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''',"""wb""" ).write(r.content )
_A : Optional[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_A : List[Any] = JukeboxConfig.from_pretrained(snake_case_ )
_A : Optional[Any] = JukeboxModel(snake_case_ )
_A : List[str] = []
_A : List[str] = {}
for i, dict_name in enumerate(snake_case_ ):
_A : List[str] = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
_A : List[Any] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_A : Optional[int] = old_dic[k]
elif k.endswith(""".w""" ):
_A : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_A : Union[str, Any] = old_dic[k]
else:
_A : List[Any] = old_dic[k]
_A : Dict = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
_A : Union[str, Any] = fix_jukebox_keys(snake_case_,model.state_dict(),snake_case_,snake_case_ )
weight_dict.append(snake_case_ )
_A : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''',"""w""" ) as txtfile:
json.dump(snake_case_,snake_case_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowercase :
def __init__( self , _a , _a=3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> str:
_A : List[Any] = parent
_A : int = batch_size
_A : Tuple = seq_length
_A : List[Any] = is_training
_A : List[Any] = use_input_mask
_A : List[str] = use_token_type_ids
_A : str = use_labels
_A : int = vocab_size
_A : Optional[Any] = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : Any = hidden_act
_A : Tuple = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : Optional[int] = max_position_embeddings
_A : List[str] = type_vocab_size
_A : Tuple = type_sequence_label_size
_A : Union[str, Any] = initializer_range
_A : Union[str, Any] = num_labels
_A : Tuple = num_choices
_A : int = scope
def a__ ( self ) -> Tuple:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_input_mask:
_A : int = random_attention_mask([self.batch_size, self.seq_length] )
_A : Dict = None
_A : List[str] = None
_A : List[Any] = None
_A : Optional[Any] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : Dict = ids_tensor([self.batch_size] , self.num_choices )
_A : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Union[str, Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[int] = FalconModel(config=_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , attention_mask=_a )
_A : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : List[Any] = True
_A : int = FalconModel(_a )
model.to(_a )
model.eval()
_A : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_A : List[str] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
_A : List[str] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]:
_A : Union[str, Any] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : Optional[Any] = True
_A : int = True
_A : Optional[Any] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
_A : List[str] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
_A : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : Any = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
_A : Union[str, Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
_A : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Dict = config_and_inputs
_A : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (FalconForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Dict = FalconModelTester(self )
_A : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A , *_A : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_A : Any = alibi
self.model_tester.create_and_check_model(_a , *_a )
def a__ ( self ) -> List[str]:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = 3
_A : List[Any] = input_dict["""input_ids"""]
_A : List[str] = input_ids.ne(1 ).to(_a )
_A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Dict = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Dict = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> List[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = 3
_A : Dict = """single_label_classification"""
_A : str = input_dict["""input_ids"""]
_A : Optional[int] = input_ids.ne(1 ).to(_a )
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : List[Any] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> List[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = input_dict["""input_ids"""]
_A : Union[str, Any] = FalconForCausalLM(_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , use_cache=_a )
_A : Optional[int] = input_ids.shape[0]
_A : Optional[Any] = model._convert_to_rw_cache(result.past_key_values )
_A : Any = model._convert_cache_to_standard_format(_a , _a )
for layer in range(len(_a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def a__ ( self ) -> int:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = 3
_A : Dict = """multi_label_classification"""
_A : Dict = input_dict["""input_ids"""]
_A : Union[str, Any] = input_ids.ne(1 ).to(_a )
_A : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A : Optional[int] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Optional[int]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_a , """use_cache""" ):
return
_A : int = model_class(_a ).to(_a )
if "use_cache" not in inputs:
_A : str = True
_A : str = model(**_a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_A : Dict = (
getattr(_a , """decoder_layers""" , _a )
or getattr(_a , """num_decoder_layers""" , _a )
or config.num_hidden_layers
)
_A : Optional[Any] = getattr(_a , """num_kv_heads""" , config.num_attention_heads )
_A : int = getattr(_a , """d_model""" , config.hidden_size )
_A : int = embed_dim // num_attention_heads
_A : List[str] = outputs["""past_key_values"""]
self.assertEqual(len(_a ) , _a )
_A , _A : Optional[Any] = inputs["""input_ids"""].shape
for i in range(_a ):
if config.new_decoder_architecture:
_A : List[str] = config.num_attention_heads
elif config.multi_query:
_A : int = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[str]:
_A : Optional[Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
_A : List[str] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(_a )
_A : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
_A : Tuple = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
_A : Optional[Any] = model.generate(**_a , do_sample=_a , max_new_tokens=19 )
_A : Optional[Any] = tokenizer.batch_decode(_a )[0]
self.assertEqual(_a , _a )
@slow
def a__ ( self ) -> List[str]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_A : List[Any] = AutoTokenizer.from_pretrained(_a )
_A : Union[str, Any] = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(_a )
_A : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , num_beams=2 , max_new_tokens=4 )
@slow
def a__ ( self ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_A : int = AutoTokenizer.from_pretrained(_a )
_A : Any = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(device=_a )
_A : Optional[int] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# Test results are the same with and without cache
_A : int = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
_A : str = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 343 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_=2,snake_case_=3,snake_case_=16,snake_case_ = 10,snake_case_ = 2 ):
def get_dataset(snake_case_ ):
_A : int = torch.randn(batch_size * n_batches,1 )
return TensorDataset(snake_case_,a * x + b + 0.1 * torch.randn(batch_size * n_batches,1 ) )
_A : int = get_dataset(snake_case_ )
_A : Union[str, Any] = get_dataset(snake_case_ )
_A : int = DataLoader(snake_case_,shuffle=snake_case_,batch_size=snake_case_,num_workers=4 )
_A : int = DataLoader(snake_case_,shuffle=snake_case_,batch_size=snake_case_,num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None ):
_A : Dict = []
for epoch in range(snake_case_ ):
# Train quickly
model.train()
for batch in dataloader:
_A , _A : Any = batch
_A : Optional[int] = model(snake_case_ )
_A : str = torch.nn.functional.mse_loss(snake_case_,snake_case_ )
accelerator.backward(snake_case_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowercase ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
_A : List[str] = nn.Parameter(torch.randn(1 ) )
_A : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
def a__ ( self , _a ) -> List[str]:
return x * self.a + self.b
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A : List[Any] = DummyModel()
_A : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A , _A : Union[str, Any] = dummy_dataloaders()
_A : str = ProjectConfiguration(total_limit=1 , project_dir=_a , automatic_checkpoint_naming=_a )
# Train baseline
_A : Union[str, Any] = Accelerator(project_config=_a )
_A , _A , _A , _A : str = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def a__ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A : str = DummyModel()
_A : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A , _A : Dict = dummy_dataloaders()
# Train baseline
_A : Tuple = Accelerator()
_A , _A , _A , _A : Union[str, Any] = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
_A : Any = os.path.join(_a , """initial""" )
accelerator.save_state(_a )
((_A) , (_A)) : List[Any] = model.a.item(), model.b.item()
_A : int = optimizer.state_dict()
_A : int = train(3 , _a , _a , _a , _a )
((_A) , (_A)) : str = model.a.item(), model.b.item()
_A : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
_A : Optional[Any] = DummyModel()
_A : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A , _A : List[Any] = dummy_dataloaders()
_A : Tuple = Accelerator()
_A , _A , _A , _A : str = accelerator.prepare(
_a , _a , _a , _a )
accelerator.load_state(_a )
((_A) , (_A)) : str = model.a.item(), model.b.item()
_A : str = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
_A : int = train(2 , _a , _a , _a , _a )
# Save everything
_A : Tuple = os.path.join(_a , """checkpoint""" )
accelerator.save_state(_a )
# Load everything back in and make sure all states work
accelerator.load_state(_a )
test_rands += train(1 , _a , _a , _a , _a )
((_A) , (_A)) : Dict = model.a.item(), model.b.item()
_A : Tuple = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A : Optional[Any] = DummyModel()
_A : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A , _A : int = dummy_dataloaders()
_A : str = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
_A : str = Accelerator(project_dir=_a , project_config=_a )
_A , _A , _A , _A : Union[str, Any] = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
accelerator.save_state()
((_A) , (_A)) : str = model.a.item(), model.b.item()
_A : Tuple = optimizer.state_dict()
_A : Any = train(3 , _a , _a , _a , _a )
((_A) , (_A)) : Optional[int] = model.a.item(), model.b.item()
_A : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
_A : List[str] = DummyModel()
_A : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A , _A : Optional[int] = dummy_dataloaders()
_A : int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_a )
_A : Tuple = Accelerator(project_dir=_a , project_config=_a )
_A , _A , _A , _A : int = accelerator.prepare(
_a , _a , _a , _a )
accelerator.load_state(os.path.join(_a , """checkpoints""" , """checkpoint_0""" ) )
((_A) , (_A)) : Tuple = model.a.item(), model.b.item()
_A : Optional[Any] = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
_A : Union[str, Any] = train(2 , _a , _a , _a , _a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , _a , _a , _a , _a )
((_A) , (_A)) : Tuple = model.a.item(), model.b.item()
_A : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
def a__ ( self ) -> List[Any]:
_A : Tuple = torch.tensor([1, 2, 3] )
_A : List[Any] = torch.tensor([2, 3, 4] )
_A : Any = DummyModel()
_A : str = torch.optim.Adam(net.parameters() )
_A : Optional[Any] = Accelerator()
with self.assertRaises(_a ) as ve:
accelerator.register_for_checkpointing(_a , _a , _a , _a )
_A : List[str] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A : Dict = DummyModel()
_A : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_A : List[str] = torch.optim.lr_scheduler.StepLR(_a , step_size=1 , gamma=0.99 )
_A , _A : Union[str, Any] = dummy_dataloaders()
_A : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
_A : Optional[Any] = Accelerator(project_dir=_a , project_config=_a )
_A , _A , _A , _A , _A : str = accelerator.prepare(
_a , _a , _a , _a , _a )
# Save initial
accelerator.save_state()
_A : Union[str, Any] = scheduler.state_dict()
train(3 , _a , _a , _a , _a , _a )
self.assertNotEqual(_a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(_a , scheduler.state_dict() )
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_A : List[Any] = DummyModel()
_A : Dict = ProjectConfiguration(automatic_checkpoint_naming=_a , total_limit=2 )
# Train baseline
_A : List[Any] = Accelerator(project_dir=_a , project_config=_a )
_A : str = accelerator.prepare(_a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_a , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def a__ ( self ) -> Any:
_A : Union[str, Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
_snake_case = "/tmp/accelerate/state_checkpointing"
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_snake_case , _snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 343 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_A : List[str] = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
_A : Dict = str(bin(snake_case_ ) )[2:]
_A : Tuple = max(len(snake_case_ ),len(snake_case_ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ),b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
return [ord(snake_case_ ) - 96 for elem in plain]
def lowerCAmelCase_ ( snake_case_ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCAmelCase_ ( ):
_A : Optional[int] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """,snake_case_ )
print("""Decoded:""",decode(snake_case_ ) )
if __name__ == "__main__":
main()
| 343 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : str = int(np.ceil((x_end - xa) / step_size ) )
_A : Union[str, Any] = np.zeros((n + 1,) )
_A : Dict = ya
_A : List[str] = xa
for k in range(snake_case_ ):
_A : int = y[k] + step_size * ode_func(snake_case_,y[k] )
_A : Any = y[k] + (
(step_size / 2) * (ode_func(snake_case_,y[k] ) + ode_func(x + step_size,snake_case_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def a__ ( *_a , **_a ) -> List[str]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a__ ( self , _a , _a , _a ) -> Dict:
_A : int = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"""score""": ANY(_a ),
"""label""": ANY(_a ),
"""box""": {"""xmin""": ANY(_a ), """ymin""": ANY(_a ), """xmax""": ANY(_a ), """ymax""": ANY(_a )},
} , )
import datasets
_A : Tuple = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_A : int = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_A : Tuple = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"""score""": ANY(_a ),
"""label""": ANY(_a ),
"""box""": {"""xmin""": ANY(_a ), """ymin""": ANY(_a ), """xmax""": ANY(_a ), """ymax""": ANY(_a )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def a__ ( self ) -> Optional[int]:
pass
@require_torch
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
_A : Dict = AutoModelForObjectDetection.from_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a )
_A : Optional[int] = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_A : List[str] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
_A : int = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def a__ ( self ) -> str:
_A : Dict = """facebook/detr-resnet-50"""
_A : Optional[Any] = AutoModelForObjectDetection.from_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a )
_A : Optional[Any] = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_A : int = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
_A : Any = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def a__ ( self ) -> Optional[Any]:
_A : Tuple = """facebook/detr-resnet-50"""
_A : List[Any] = pipeline("""object-detection""" , model=_a )
_A : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
_A : Optional[int] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def a__ ( self ) -> Optional[int]:
_A : str = 0.9985
_A : List[Any] = """facebook/detr-resnet-50"""
_A : Optional[Any] = pipeline("""object-detection""" , model=_a )
_A : List[str] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def a__ ( self ) -> Optional[int]:
_A : Any = """Narsil/layoutlmv3-finetuned-funsd"""
_A : Any = 0.9993
_A : Union[str, Any] = pipeline("""object-detection""" , model=_a , threshold=_a )
_A : int = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 343 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = 1
for i in range(1,num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = 0
while number > 0:
_A : Optional[Any] = number % 10
sum_of_digits += last_digit
_A : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ ( snake_case_ = 100 ):
_A : List[str] = factorial(snake_case_ )
_A : Optional[Any] = split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 343 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 1 |
import math
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : float = xa
_A : float = xa
while True:
if x_n == x_na or function(snake_case_ ) == function(snake_case_ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
_A : float = x_na - (
function(snake_case_ ) / ((function(snake_case_ ) - function(snake_case_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_A : List[str] = x_na
_A : int = x_na
def lowerCAmelCase_ ( snake_case_ ):
return math.pow(snake_case_,3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 343 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = SwinConfig(
embed_dim=192,depths=(2, 2, 18, 2),num_heads=(6, 12, 24, 48),window_size=12,out_features=["""stage2""", """stage3""", """stage4"""],)
_A : Optional[int] = DetaConfig(
backbone_config=snake_case_,num_queries=900,encoder_ffn_dim=2048,decoder_ffn_dim=2048,num_feature_levels=5,assign_first_stage=snake_case_,with_box_refine=snake_case_,two_stage=snake_case_,)
# set labels
_A : Tuple = """huggingface/label-files"""
if "o365" in model_name:
_A : Any = 366
_A : Union[str, Any] = """object365-id2label.json"""
else:
_A : Dict = 91
_A : Optional[int] = """coco-detection-id2label.json"""
_A : List[Any] = num_labels
_A : Any = json.load(open(cached_download(hf_hub_url(snake_case_,snake_case_,repo_type="""dataset""" ) ),"""r""" ) )
_A : Union[str, Any] = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Tuple = idalabel
_A : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = dct.pop(snake_case_ )
_A : List[Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_A : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A : str = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_A : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Optional[int] = in_proj_weight[:dim, :]
_A : Tuple = in_proj_bias[: dim]
_A : List[str] = in_proj_weight[
dim : dim * 2, :
]
_A : Dict = in_proj_bias[
dim : dim * 2
]
_A : Tuple = in_proj_weight[
-dim :, :
]
_A : Dict = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# transformer decoder self-attention layers
_A : Dict = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_A : int = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_A : int = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Union[str, Any] = in_proj_weight[:hidden_size, :]
_A : str = in_proj_bias[:hidden_size]
_A : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_A : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_A : Any = in_proj_weight[-hidden_size:, :]
_A : str = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ):
_A : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Dict = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = get_deta_config(snake_case_ )
# load original state dict
if model_name == "deta-swin-large":
_A : Any = hf_hub_download(repo_id="""nielsr/deta-checkpoints""",filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
_A : Optional[int] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""",filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_A : Any = torch.load(snake_case_,map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(snake_case_,param.shape )
# rename keys
_A : Tuple = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
read_in_swin_q_k_v(snake_case_,config.backbone_config )
read_in_decoder_q_k_v(snake_case_,snake_case_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_A : Union[str, Any] = state_dict.pop(snake_case_ )
_A : Dict = val
if "input_proj" in key:
_A : Optional[Any] = state_dict.pop(snake_case_ )
_A : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_A : Tuple = state_dict.pop(snake_case_ )
_A : int = val
# finally, create HuggingFace model and load state dict
_A : Tuple = DetaForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
_A : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(snake_case_ )
# load image processor
_A : List[str] = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
_A : Optional[int] = prepare_img()
_A : Dict = processor(images=snake_case_,return_tensors="""pt""" )
_A : List[Any] = encoding["""pixel_values"""]
_A : Tuple = model(pixel_values.to(snake_case_ ) )
# verify logits
print("""Logits:""",outputs.logits[0, :3, :3] )
print("""Boxes:""",outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_A : List[Any] = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
_A : Optional[int] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
_A : Union[str, Any] = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
_A : Any = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3],expected_logits.to(snake_case_ ),atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3],expected_boxes.to(snake_case_ ),atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_snake_case = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = {}
state_dict.pop("""pixel_mean""",snake_case_ )
state_dict.pop("""pixel_std""",snake_case_ )
_A : Tuple = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_A : Any = key.replace(snake_case_,snake_case_ )
if re.match(snake_case_,snake_case_ ):
_A : Optional[Any] = int(re.match(snake_case_,snake_case_ ).group(2 ) )
if layer_nb == 0:
_A : str = key.replace("""layers.0""","""proj_in""" )
elif layer_nb == 1:
_A : str = key.replace("""layers.1""","""layers.0""" )
elif layer_nb == 2:
_A : Optional[int] = key.replace("""layers.2""","""proj_out""" )
_A : Optional[Any] = value
_A : Any = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_="ybelkada/segment-anything" ):
_A : Union[str, Any] = hf_hub_download(snake_case_,f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_A : Dict = SamConfig()
elif "sam_vit_l" in model_name:
_A : List[str] = SamVisionConfig(
hidden_size=1024,num_hidden_layers=24,num_attention_heads=16,global_attn_indexes=[5, 11, 17, 23],)
_A : List[Any] = SamConfig(
vision_config=snake_case_,)
elif "sam_vit_h" in model_name:
_A : List[str] = SamVisionConfig(
hidden_size=1280,num_hidden_layers=32,num_attention_heads=16,global_attn_indexes=[7, 15, 23, 31],)
_A : Optional[Any] = SamConfig(
vision_config=snake_case_,)
_A : Union[str, Any] = torch.load(snake_case_,map_location="""cpu""" )
_A : Optional[Any] = replace_keys(snake_case_ )
_A : Any = SamImageProcessor()
_A : List[str] = SamProcessor(image_processor=snake_case_ )
_A : List[str] = SamModel(snake_case_ )
hf_model.load_state_dict(snake_case_ )
_A : Dict = hf_model.to("""cuda""" )
_A : str = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
_A : Optional[int] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" )
_A : Optional[Any] = [[[400, 650]]]
_A : Union[str, Any] = [[1]]
_A : List[Any] = processor(images=np.array(snake_case_ ),return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : List[Any] = hf_model(**snake_case_ )
_A : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_A : Optional[Any] = processor(
images=np.array(snake_case_ ),input_points=snake_case_,input_labels=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : int = hf_model(**snake_case_ )
_A : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_A : Optional[int] = ((75, 275, 1725, 850),)
_A : Any = processor(images=np.array(snake_case_ ),input_boxes=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : str = hf_model(**snake_case_ )
_A : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_A : int = [[[400, 650], [800, 650]]]
_A : Dict = [[1, 1]]
_A : List[Any] = processor(
images=np.array(snake_case_ ),input_points=snake_case_,input_labels=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : Optional[int] = hf_model(**snake_case_ )
_A : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
_snake_case = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_snake_case = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ = " " ):
_A : List[Any] = []
_A : Optional[Any] = 0
for index, char in enumerate(snake_case_ ):
if char == separator:
split_words.append(string[last_index:index] )
_A : str = index + 1
elif index + 1 == len(snake_case_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_A : Tuple = parent
_A : Dict = batch_size
_A : Dict = seq_length
_A : int = is_training
_A : Optional[Any] = use_input_mask
_A : str = use_token_type_ids
_A : Optional[int] = use_labels
_A : str = vocab_size
_A : List[Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : List[str] = intermediate_size
_A : Optional[Any] = hidden_act
_A : str = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : int = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : Optional[int] = initializer_range
_A : Optional[int] = num_labels
_A : str = num_choices
_A : Dict = relative_attention
_A : Dict = position_biased_input
_A : Tuple = pos_att_type
_A : List[str] = scope
def a__ ( self ) -> Any:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : List[str] = None
if self.use_input_mask:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A : Union[str, Any] = None
if self.use_token_type_ids:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : List[str] = None
_A : int = None
_A : Union[str, Any] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : Any = ids_tensor([self.batch_size] , self.num_choices )
_A : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Dict:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a__ ( self , _a ) -> int:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
_A : int = DebertaVaModel(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a )[0]
_A : Any = model(_a , token_type_ids=_a )[0]
_A : Optional[int] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
_A : Union[str, Any] = DebertaVaForMaskedLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : List[Any] = self.num_labels
_A : List[Any] = DebertaVaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : List[Any] = self.num_labels
_A : List[Any] = DebertaVaForTokenClassification(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
_A : List[str] = DebertaVaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_A : int = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_A : List[Any] = DebertaVaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_A : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Optional[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> int:
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : str = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = DebertaVaModelTester(self )
_A : Any = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def a__ ( self ) -> Dict:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def a__ ( self ) -> str:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
def a__ ( self ) -> str:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_a )
@slow
def a__ ( self ) -> List[str]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : str = DebertaVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def a__ ( self ) -> List[str]:
pass
@slow
def a__ ( self ) -> Any:
_A : Dict = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_A : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A : List[str] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
_A : Tuple = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 343 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> int:
_A : List[str] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_a ).to(_a )
_A : Tuple = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_A : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
_A : List[str] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
_A : List[str] = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
_A : Optional[int] = -(labels.shape[-1] * loss.item())
_A : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]:
_A : Optional[Any] = parent
_A : Any = batch_size
_A : Tuple = seq_length
_A : Tuple = is_training
_A : Tuple = use_attention_mask
_A : str = use_token_type_ids
_A : Union[str, Any] = use_labels
_A : List[str] = vocab_size
_A : List[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : str = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : List[str] = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : str = initializer_range
_A : str = num_choices
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_attention_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : int = None
if self.use_token_type_ids:
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Dict:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A , _A : Tuple = config_and_inputs
_A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_A : Any = model_class_name.from_pretrained("""albert-base-v2""" )
_A : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[Any]:
_A : List[str] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_A : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_A : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_A : Optional[int] = model(_a , attention_mask=_a )[0]
_A : str = (1, 11, 768)
self.assertEqual(output.shape , _a )
_A : str = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaControlnetPipeline
_a = ["image_embeds", "negative_image_embeds", "hint"]
_a = ["image_embeds", "negative_image_embeds", "hint"]
_a = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> Dict:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_A : List[str] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Dict = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> int:
torch.manual_seed(0 )
_A : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> List[str]:
_A : int = self.dummy_unet
_A : Optional[Any] = self.dummy_movq
_A : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_a , )
_A : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
_A : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : int = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : Dict = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> List[Any]:
_A : Any = """cpu"""
_A : Any = self.get_dummy_components()
_A : List[str] = self.pipeline_class(**_a )
_A : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : str = output.images
_A : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Optional[Any] = image[0, -3:, -3:, -1]
_A : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
_A : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_A : List[str] = torch.from_numpy(np.array(_a ) ).float() / 255.0
_A : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_A : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_A : Optional[int] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : List[str] = """A robot, 4k photo"""
_A : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_A , _A : str = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
_A : Tuple = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 343 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( UpperCamelCase__ ):
_a = (UniPCMultistepScheduler,)
_a = (("num_inference_steps", 2_5),)
def a__ ( self , **_a ) -> List[str]:
_A : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**_a )
return config
def a__ ( self , _a=0 , **_a ) -> Dict:
_A : Any = dict(self.forward_default_kwargs )
_A : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _a )
_A : Dict = self.dummy_sample
_A : Optional[Any] = 0.1 * sample
_A : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A : int = self.get_scheduler_config(**_a )
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_A : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_A : Optional[Any] = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_A : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_A , _A : Dict = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_A : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
_A : Union[str, Any] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self , _a=0 , **_a ) -> Dict:
_A : Union[str, Any] = dict(self.forward_default_kwargs )
_A : Tuple = kwargs.pop("""num_inference_steps""" , _a )
_A : int = self.dummy_sample
_A : int = 0.1 * sample
_A : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A : str = self.get_scheduler_config()
_A : str = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_A : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_A : Union[str, Any] = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_A : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_A : Optional[int] = scheduler.step(_a , _a , _a , **_a ).prev_sample
_A : Optional[int] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self , _a=None , **_a ) -> Dict:
if scheduler is None:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config(**_a )
_A : str = scheduler_class(**_a )
_A : List[Any] = self.scheduler_classes[0]
_A : Dict = self.get_scheduler_config(**_a )
_A : Any = scheduler_class(**_a )
_A : List[str] = 10
_A : List[str] = self.dummy_model()
_A : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a ).prev_sample
return sample
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = dict(self.forward_default_kwargs )
_A : List[Any] = kwargs.pop("""num_inference_steps""" , _a )
for scheduler_class in self.scheduler_classes:
_A : Tuple = self.get_scheduler_config()
_A : str = scheduler_class(**_a )
_A : Optional[Any] = self.dummy_sample
_A : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_a , """set_timesteps""" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , """set_timesteps""" ):
_A : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_A : Optional[Any] = scheduler.timesteps[5]
_A : Any = scheduler.timesteps[6]
_A : Any = scheduler.step(_a , _a , _a , **_a ).prev_sample
_A : List[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a__ ( self ) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_A : Dict = UniPCMultistepScheduler(**self.get_scheduler_config() )
_A : Tuple = self.full_loop(scheduler=_a )
_A : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_A : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_A : int = DEISMultistepScheduler.from_config(scheduler.config )
_A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_A : str = UniPCMultistepScheduler.from_config(scheduler.config )
_A : List[Any] = self.full_loop(scheduler=_a )
_A : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def a__ ( self ) -> int:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Tuple:
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , solver_order=_a , solver_type=_a , )
def a__ ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> List[Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , )
_A : Any = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def a__ ( self ) -> Dict:
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def a__ ( self ) -> str:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def a__ ( self ) -> int:
_A : Optional[int] = self.full_loop()
_A : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def a__ ( self ) -> Any:
_A : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
_A : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def a__ ( self ) -> Dict:
_A : Optional[Any] = self.scheduler_classes[0]
_A : Union[str, Any] = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_A : str = scheduler_class(**_a )
_A : int = 10
_A : Optional[int] = self.dummy_model()
_A : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Dict = model(_a , _a )
_A : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
def a__ ( self , **_a ) -> str:
for scheduler_class in self.scheduler_classes:
_A : int = self.get_scheduler_config(**_a )
_A : Tuple = scheduler_class(**_a )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=24 , _a=2 , _a=6 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=None , _a=1000 , ) -> Optional[int]:
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : Union[str, Any] = seq_length
_A : str = is_training
_A : Optional[Any] = use_input_mask
_A : str = use_token_type_ids
_A : int = use_labels
_A : Any = vocab_size
_A : Optional[Any] = hidden_size
_A : Tuple = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : List[Any] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Optional[Any] = num_labels
_A : Any = scope
_A : List[str] = range_bbox
def a__ ( self ) -> int:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A : Optional[Any] = bbox[i, j, 3]
_A : Dict = bbox[i, j, 1]
_A : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A : List[Any] = bbox[i, j, 2]
_A : int = bbox[i, j, 0]
_A : List[Any] = t
_A : Dict = None
if self.use_input_mask:
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A : Any = None
if self.use_token_type_ids:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Union[str, Any] = None
_A : int = None
if self.use_labels:
_A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
_A : Any = LiltModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
_A : Optional[int] = model(_a , bbox=_a , token_type_ids=_a )
_A : str = model(_a , bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
_A : List[str] = self.num_labels
_A : Optional[Any] = LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
_A : Optional[Any] = LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_A : List[Any] = model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> Tuple:
_A : Optional[Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[Any] = config_and_inputs
_A : Dict = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self , _a , _a , _a , _a , _a ) -> Dict:
return True
def a__ ( self ) -> Optional[Any]:
_A : int = LiltModelTester(self )
_A : Union[str, Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def a__ ( self ) -> Any:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> int:
_A : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Union[str, Any] = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
_A : Optional[int] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(_a )
_A : Optional[int] = torch.tensor([[1, 2]] , device=_a )
_A : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a )
# forward pass
with torch.no_grad():
_A : List[Any] = model(input_ids=_a , bbox=_a )
_A : Tuple = torch.Size([1, 2, 768] )
_A : Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_a , )
self.assertTrue(outputs.last_hidden_state.shape , _a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1e-3 ) )
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : str = [0] * len(snake_case_ )
_A : Optional[int] = []
_A : List[Any] = []
_A : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case_ ) ):
if indegree[i] == 0:
queue.append(snake_case_ )
while queue:
_A : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(snake_case_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case_ )
if cnt != len(snake_case_ ):
print("""Cycle exists""" )
else:
print(snake_case_ )
# Adjacency List of Graph
_snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "swin2sr"
_a = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=64 , _a=1 , _a=3 , _a=180 , _a=[6, 6, 6, 6, 6, 6] , _a=[6, 6, 6, 6, 6, 6] , _a=8 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , _a=2 , _a=1.0 , _a="1conv" , _a="pixelshuffle" , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Optional[int] = image_size
_A : int = patch_size
_A : int = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : List[str] = len(_a )
_A : int = num_heads
_A : int = window_size
_A : str = mlp_ratio
_A : Dict = qkv_bias
_A : Tuple = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : Optional[int] = drop_path_rate
_A : List[Any] = hidden_act
_A : int = use_absolute_embeddings
_A : List[Any] = layer_norm_eps
_A : List[str] = initializer_range
_A : Any = upscale
_A : Optional[int] = img_range
_A : int = resi_connection
_A : Any = upsampler
| 343 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 1 |
from math import isclose, sqrt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = point_y / 4 / point_x
_A : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_A : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_A : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_A : Any = outgoing_gradient**2 + 4
_A : int = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_A : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
_A : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_A : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_A : Optional[int] = x_minus if isclose(snake_case_,snake_case_ ) else x_plus
_A : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase_ ( snake_case_ = 1.4,snake_case_ = -9.6 ):
_A : int = 0
_A : float = first_x_coord
_A : float = first_y_coord
_A : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_A , _A , _A : Dict = next_point(snake_case_,snake_case_,snake_case_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir("fixtures")
_snake_case = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
_A : Optional[int] = 0
def a__ ( self ) -> List[str]:
_A : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
_A : Dict = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_A : str = AutoFeatureExtractor.from_pretrained(_a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_A : Optional[int] = WavaVecaFeatureExtractor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_A : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> int:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_A : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def a__ ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoFeatureExtractor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : List[str] = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[Any] = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
class lowercase ( UpperCamelCase__ ):
_a = True
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# If remote code is not set, the default is to use local
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_A : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : int = model_type_to_module_name(snake_case_ )
_A : Tuple = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : Dict = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> Any:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> str:
_A : Any = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : Dict = True
_A , _A : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : str = config_dict.get("""feature_extractor_type""" , _a )
_A : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Union[str, Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : str = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Dict = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Optional[Any] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : int = feature_extractor_class_from_name(_a )
_A : List[str] = feature_extractor_auto_map is not None
_A : Dict = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : str = get_class_from_dynamic_module(
_a , _a , **_a )
_A : Dict = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCAmelCase_ ( snake_case_ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
def lowerCAmelCase_ ( snake_case_ ):
# word like '180' or '身高' or '神'
for char in word:
_A : List[str] = ord(snake_case_ )
if not _is_chinese_char(snake_case_ ):
return 0
return 1
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = set()
for token in tokens:
_A : int = len(snake_case_ ) > 1 and is_chinese(snake_case_ )
if chinese_word:
word_set.add(snake_case_ )
_A : int = list(snake_case_ )
return word_list
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if not chinese_word_set:
return bert_tokens
_A : Tuple = max([len(snake_case_ ) for w in chinese_word_set] )
_A : int = bert_tokens
_A , _A : List[str] = 0, len(snake_case_ )
while start < end:
_A : int = True
if is_chinese(bert_word[start] ):
_A : List[Any] = min(end - start,snake_case_ )
for i in range(snake_case_,1,-1 ):
_A : Any = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1,start + i ):
_A : List[Any] = """##""" + bert_word[j]
_A : Any = start + i
_A : int = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[Any] = []
for i in range(0,len(snake_case_ ),100 ):
_A : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_A : Optional[int] = [get_chinese_word(snake_case_ ) for r in res]
ltp_res.extend(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
_A : Tuple = []
for i in range(0,len(snake_case_ ),100 ):
_A : Optional[Any] = bert_tokenizer(lines[i : i + 100],add_special_tokens=snake_case_,truncation=snake_case_,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(snake_case_ ) == len(snake_case_ )
_A : Optional[int] = []
for input_ids, chinese_word in zip(snake_case_,snake_case_ ):
_A : int = []
for id in input_ids:
_A : Any = bert_tokenizer._convert_id_to_token(snake_case_ )
input_tokens.append(snake_case_ )
_A : Optional[Any] = add_sub_symbol(snake_case_,snake_case_ )
_A : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case_ ):
if token[:2] == "##":
_A : Dict = token[2:]
# save chinese tokens' pos
if len(snake_case_ ) == 1 and _is_chinese_char(ord(snake_case_ ) ):
ref_id.append(snake_case_ )
ref_ids.append(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
return ref_ids
def lowerCAmelCase_ ( snake_case_ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name,"""r""",encoding="""utf-8""" ) as f:
_A : str = f.readlines()
_A : Optional[int] = [line.strip() for line in data if len(snake_case_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_A : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
_A : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
_A : Tuple = prepare_ref(snake_case_,snake_case_,snake_case_ )
with open(args.save_path,"""w""",encoding="""utf-8""" ) as f:
_A : Tuple = [json.dumps(snake_case_ ) + """\n""" for ref in ref_ids]
f.writelines(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
_snake_case = parser.parse_args()
main(args)
| 343 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_snake_case = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_A : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : Any = 1
_A : List[Any] = len(self.sp_model ) + self.fairseq_offset
_A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
_A : str = self.__dict__.copy()
_A : int = None
_A : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _a ) -> int:
_A : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : List[str] = {}
_A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
_A : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Optional[Any] = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a__ ( self ) -> str:
_A : Dict = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def a__ ( self , _a ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : int = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self , _a ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self , _a ) -> Dict:
_A : int = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : str = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_A : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase ( UpperCamelCase__ ):
_a = "blenderbot-small"
_a = ["past_key_values"]
_a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _a=5_0265 , _a=512 , _a=8 , _a=2048 , _a=16 , _a=8 , _a=2048 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1 , _a=False , _a=0 , _a=1 , _a=2 , _a=2 , **_a , ) -> List[str]:
_A : Dict = vocab_size
_A : Tuple = max_position_embeddings
_A : Union[str, Any] = d_model
_A : Optional[int] = encoder_ffn_dim
_A : Tuple = encoder_layers
_A : List[str] = encoder_attention_heads
_A : Union[str, Any] = decoder_ffn_dim
_A : Union[str, Any] = decoder_layers
_A : int = decoder_attention_heads
_A : Any = dropout
_A : Any = attention_dropout
_A : Optional[int] = activation_dropout
_A : str = activation_function
_A : Dict = init_std
_A : Optional[int] = encoder_layerdrop
_A : List[str] = decoder_layerdrop
_A : Tuple = use_cache
_A : Optional[Any] = encoder_layers
_A : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_A : List[str] = {0: """batch"""}
_A : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_A : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
_A : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_A : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_A , _A : Tuple = self.num_layers
for i in range(_a ):
_A : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
_A : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_A : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : Optional[int] = super().outputs
else:
_A : int = super(_a , self ).outputs
if self.use_past:
_A , _A : str = self.num_layers
for i in range(_a ):
_A : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
_A : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
_A : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
# Generate decoder inputs
_A : List[str] = seq_length if not self.use_past else 1
_A : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
_A : Tuple = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_A : Optional[Any] = dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_A , _A : Union[str, Any] = common_inputs["""input_ids"""].shape
_A : str = common_inputs["""decoder_input_ids"""].shape[1]
_A , _A : Dict = self.num_attention_heads
_A : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Union[str, Any] = decoder_seq_length + 3
_A : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_a , _a )] , dim=1 )
_A : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A : Any = self.num_layers
_A : List[Any] = min(_a , _a )
_A : Any = max(_a , _a ) - min_num_layers
_A : Any = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
_A : str = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
_A : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_A , _A : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_A : Optional[Any] = seqlen + 2
_A , _A : List[str] = self.num_layers
_A , _A : Dict = self.num_attention_heads
_A : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Tuple = common_inputs["""attention_mask"""].dtype
_A : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_a , _a , dtype=_a )] , dim=1 )
_A : Optional[Any] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_A : List[str] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A : Dict = tokenizer.num_special_tokens_to_add(_a )
_A : Optional[int] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
_A : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_A : Dict = dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_A : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
elif self.task == "causal-lm":
_A : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
_A : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def a__ ( self , _a , _a , _a , _a ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
_A : Tuple = super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
_A : Optional[int] = super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
| 343 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xlnet"
_a = ["mems"]
_a = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=3_2000 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=True , _a="bi" , _a=0.02 , _a=1e-12 , _a=0.1 , _a=512 , _a=None , _a=True , _a=False , _a=False , _a=-1 , _a=False , _a="last" , _a=True , _a="tanh" , _a=0.1 , _a=5 , _a=5 , _a=5 , _a=1 , _a=2 , **_a , ) -> Union[str, Any]:
_A : str = vocab_size
_A : List[Any] = d_model
_A : Union[str, Any] = n_layer
_A : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A : List[Any] = d_model // n_head
_A : str = ff_activation
_A : Any = d_inner
_A : List[Any] = untie_r
_A : str = attn_type
_A : Any = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Dict = dropout
_A : int = mem_len
_A : Optional[Any] = reuse_len
_A : Tuple = bi_data
_A : List[Any] = clamp_len
_A : Tuple = same_length
_A : str = summary_type
_A : int = summary_use_proj
_A : Optional[int] = summary_activation
_A : List[str] = summary_last_dropout
_A : Optional[int] = start_n_top
_A : int = end_n_top
_A : Optional[int] = bos_token_id
_A : Optional[int] = pad_token_id
_A : int = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , _a , )
_A : Dict = kwargs["""use_cache"""]
_A : List[str] = use_mems_eval
_A : str = use_mems_train
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
@property
def a__ ( self ) -> int:
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def a__ ( self , _a ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 343 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 1 |
from __future__ import annotations
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase :
def __init__( self , _a , _a , _a , _a , _a , _a , ) -> Dict:
_A : Union[str, Any] = pos_x
_A : Optional[int] = pos_y
_A : str = (pos_y, pos_x)
_A : int = goal_x
_A : str = goal_y
_A : Optional[Any] = g_cost
_A : Any = parent
_A : List[Any] = self.calculate_heuristic()
def a__ ( self ) -> float:
_A : int = abs(self.pos_x - self.goal_x )
_A : List[str] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ) -> bool:
return self.f_cost < other.f_cost
class lowercase :
def __init__( self , _a , _a ) -> int:
_A : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
_A : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _a )
_A : List[Any] = [self.start]
_A : list[Node] = []
_A : List[Any] = False
def a__ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_A : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_A : List[Any] = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
_A : Any = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
_A : int = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def a__ ( self , _a ) -> list[Node]:
_A : Tuple = []
for action in delta:
_A : Tuple = parent.pos_x + action[1]
_A : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def a__ ( self , _a ) -> Path:
_A : Any = node
_A : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A : List[str] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_snake_case = GreedyBestFirst(init, goal)
_snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_snake_case = 2
for elem in grid:
print(elem)
| 343 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_snake_case = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_snake_case = dataset.iloc[:, 1:2].values
_snake_case = dataset.iloc[:, 2].values
_snake_case , _snake_case , _snake_case , _snake_case = train_test_split(X, y, test_size=0.2, random_state=0)
_snake_case = PolynomialFeatures(degree=4)
_snake_case = poly_reg.fit_transform(X)
_snake_case = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ):
plt.scatter(snake_case_,snake_case_,color="""red""" )
plt.plot(snake_case_,pol_reg.predict(poly_reg.fit_transform(snake_case_ ) ),color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 343 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_snake_case = {
"camembert-base": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=["<s>NOTUSED", "</s>NOTUSED"] , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_A : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_A : List[str] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_A : int = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_A : Dict = len(self.fairseq_tokens_to_ids )
_A : Union[str, Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : str = [self.cls_token_id]
_A : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Dict = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Any:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def a__ ( self ) -> Union[str, Any]:
_A : int = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def a__ ( self , _a ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def a__ ( self , _a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self , _a ) -> Tuple:
_A : int = []
_A : Tuple = """"""
_A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_A : str = True
_A : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
_A : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> str:
_A : Dict = self.__dict__.copy()
_A : List[str] = None
return state
def __setstate__( self , _a ) -> Any:
_A : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : Optional[Any] = {}
_A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Dict = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 343 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 1 |
_snake_case = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
# Base Case
if index == len(snake_case_ ):
return True
# Recursive Step
for i in range(snake_case_ ):
if valid_coloring(graph[index],snake_case_,snake_case_ ):
# Color current vertex
_A : int = i
# Validate coloring
if util_color(snake_case_,snake_case_,snake_case_,index + 1 ):
return True
# Backtrack
_A : int = -1
return False
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = [-1] * len(snake_case_ )
if util_color(snake_case_,snake_case_,snake_case_,0 ):
return colored_vertices
return []
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
from math import ceil, sqrt
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Optional[Any] = 0
for outer_width in range(3,(limit // 4) + 2 ):
if outer_width**2 > limit:
_A : Union[str, Any] = max(ceil(sqrt(outer_width**2 - limit ) ),1 )
else:
_A : Optional[int] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_snake_case = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_snake_case = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_snake_case = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_snake_case = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_snake_case = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_snake_case = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_MAPPING
_snake_case = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , ) -> List[str]:
_A : int = parent
_A : Dict = batch_size
_A : Optional[int] = image_size
_A : Tuple = patch_size
_A : Optional[Any] = num_channels
_A : List[Any] = is_training
_A : Any = use_labels
_A : Optional[Any] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Dict = intermediate_size
_A : Optional[int] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = type_sequence_label_size
_A : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : Union[str, Any] = (image_size // patch_size) ** 2
_A : Dict = num_patches + 1
def a__ ( self ) -> int:
_A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
return config, pixel_values
def a__ ( self , _a , _a ) -> int:
_A : str = FlaxViTModel(config=_a )
_A : Tuple = model(_a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_A : Tuple = (self.image_size, self.image_size)
_A : Union[str, Any] = (self.patch_size, self.patch_size)
_A : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = self.type_sequence_label_size
_A : Optional[int] = FlaxViTForImageClassification(config=_a )
_A : str = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : List[Any] = 1
_A : Any = FlaxViTForImageClassification(_a )
_A : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : Any = model(_a )
def a__ ( self ) -> str:
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) ,
) : List[str] = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a__ ( self ) -> None:
_A : List[Any] = FlaxViTModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def a__ ( self ) -> List[str]:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : Union[str, Any] = self._prepare_for_class(_a , _a )
_A : Tuple = model_class(_a )
@jax.jit
def model_jitted(_a , **_a ):
return model(pixel_values=_a , **_a )
with self.subTest("""JIT Enabled""" ):
_A : List[str] = model_jitted(**_a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_A : List[str] = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a__ ( self ) -> Any:
for model_class_name in self.all_model_classes:
_A : Dict = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
_A : List[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "swin"
_a = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_A : Union[str, Any] = image_size
_A : str = patch_size
_A : Any = num_channels
_A : str = embed_dim
_A : Optional[int] = depths
_A : Optional[int] = len(_a )
_A : Any = num_heads
_A : Union[str, Any] = window_size
_A : Optional[int] = mlp_ratio
_A : Any = qkv_bias
_A : Any = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Optional[int] = drop_path_rate
_A : List[str] = hidden_act
_A : Any = use_absolute_embeddings
_A : Dict = layer_norm_eps
_A : List[Any] = initializer_range
_A : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A : Optional[Any] = int(embed_dim * 2 ** (len(_a ) - 1) )
_A : Optional[int] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : List[str] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-4
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import os
def lowerCAmelCase_ ( ):
_A : List[Any] = os.path.join(os.path.dirname(snake_case_ ),"""num.txt""" )
with open(snake_case_ ) as file_hand:
return str(sum(int(snake_case_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 343 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 1 |
from itertools import product
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = sides_number
_A : Tuple = max_face_number * dice_number
_A : Optional[Any] = [0] * (max_total + 1)
_A : int = 1
_A : List[str] = range(snake_case_,max_face_number + 1 )
for dice_numbers in product(snake_case_,repeat=snake_case_ ):
_A : List[str] = sum(snake_case_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase_ ( ):
_A : List[Any] = total_frequency_distribution(
sides_number=4,dice_number=9 )
_A : str = total_frequency_distribution(
sides_number=6,dice_number=6 )
_A : List[Any] = 0
_A : str = 9
_A : Tuple = 4 * 9
_A : int = 6
for peter_total in range(snake_case_,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_A : Optional[int] = (4**9) * (6**6)
_A : Any = peter_wins_count / total_games_number
_A : Dict = round(snake_case_,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 1 |
from collections import deque
from .hash_table import HashTable
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
def a__ ( self , _a , _a ) -> Union[str, Any]:
_A : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_a )
_A : Tuple = self.values[key]
def a__ ( self ) -> int:
return (
sum(self.charge_factor - len(_a ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self , _a , _a=None ) -> Dict:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_a ) == 0
):
return key
return super()._collision_resolution(_a , _a )
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "camembert"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Any:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Dict = vocab_size
_A : str = hidden_size
_A : List[Any] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Dict = hidden_act
_A : int = intermediate_size
_A : str = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Union[str, Any] = initializer_range
_A : Optional[int] = layer_norm_eps
_A : Optional[Any] = position_embedding_type
_A : Tuple = use_cache
_A : int = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_snake_case = {"UserAgent": UserAgent().random}
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = script.contents[0]
_A : Tuple = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : Optional[int] = F'''https://www.instagram.com/{username}/'''
_A : Union[str, Any] = self.get_json()
def a__ ( self ) -> dict:
_A : Tuple = requests.get(self.url , headers=_a ).text
_A : Dict = BeautifulSoup(_a , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def a__ ( self ) -> str:
return self.user_data["username"]
@property
def a__ ( self ) -> str:
return self.user_data["full_name"]
@property
def a__ ( self ) -> str:
return self.user_data["biography"]
@property
def a__ ( self ) -> str:
return self.user_data["business_email"]
@property
def a__ ( self ) -> str:
return self.user_data["external_url"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a__ ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_verified"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase_ ( snake_case_ = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
_A : Tuple = InstagramUser(snake_case_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data,snake_case_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 343 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 1 |
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Union[str, Any] = g.get_repo("""huggingface/transformers""" )
_A : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Union[str, Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Optional[Any] = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 343 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCAmelCase_ ( snake_case_ ):
if "://" in dataset_path:
_A : List[str] = dataset_path.split("""://""" )[1]
return dataset_path
def lowerCAmelCase_ ( snake_case_ ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = not is_remote_filesystem(snake_case_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case_ ),fs._strip_protocol(snake_case_ ) )
else:
fs.mv(snake_case_,snake_case_,recursive=snake_case_ )
def lowerCAmelCase_ ( ):
if hasattr(fsspec.asyn,"""reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_A : Dict = None
_A : Optional[int] = None
_A : Tuple = threading.Lock()
| 343 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_A : Any = TapasConfig.from_json_file(snake_case_ )
# set absolute/relative position embeddings parameter
_A : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_A : List[str] = TapasForQuestionAnswering(config=snake_case_ )
elif task == "WTQ":
# run_task_main.py hparams
_A : str = 4
_A : Tuple = True
# hparam_utils.py hparams
_A : List[str] = 0.66_46_94
_A : int = 0.20_79_51
_A : Tuple = 0.12_11_94
_A : Tuple = True
_A : int = True
_A : List[Any] = False
_A : List[str] = 0.0_35_25_13
_A : List[str] = TapasForQuestionAnswering(config=snake_case_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_A : Any = 4
_A : Any = False
# hparam_utils.py hparams
_A : int = 36.45_19
_A : Tuple = 0.90_34_21
_A : Dict = 2_22.0_88
_A : Dict = True
_A : Any = True
_A : List[str] = True
_A : List[str] = 0.76_31_41
_A : Dict = TapasForQuestionAnswering(config=snake_case_ )
elif task == "TABFACT":
_A : Tuple = TapasForSequenceClassification(config=snake_case_ )
elif task == "MLM":
_A : Dict = TapasForMaskedLM(config=snake_case_ )
elif task == "INTERMEDIATE_PRETRAINING":
_A : Optional[int] = TapasModel(config=snake_case_ )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case_ )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_A : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""",model_max_length=512 )
tokenizer.save_pretrained(snake_case_ )
print("""Used relative position embeddings:""",model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER","False" ) ) is not True,reason="Skipping test because should only be run when releasing minor transformers version",)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_a , )
assert hasattr(self , """env""" )
def a__ ( self , _a=1 ) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def a__ ( self , _a ) -> Any:
TrainingJobAnalytics(_a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def a__ ( self ) -> str:
# create estimator
_A : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_A : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _a )
| 343 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 1 |
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( snake_case_ ):
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
if rng is None:
_A : List[str] = random.Random()
_A : Optional[Any] = 1
for dim in shape:
total_dims *= dim
_A : Optional[Any] = []
for _ in range(snake_case_ ):
values.append(rng.randint(0,vocab_size - 1 ) )
_A : int = np.array(snake_case_,dtype=jnp.intaa ).reshape(snake_case_ )
return output
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = ids_tensor(snake_case_,vocab_size=2,rng=snake_case_ )
# make sure that at least one token is attended to for each batch
_A : Optional[int] = 1
return attn_mask
@require_flax
class lowercase :
_a = None
_a = ()
def a__ ( self ) -> Optional[int]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_A : int = 2
_A : Dict = inputs["""input_ids"""].shape[-1] // 2
_A : Optional[int] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
_A : str = jnp.ones_like(_a )
_A : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_A : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_A : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> Optional[Any]:
_A , _A , _A , _A : Optional[int] = self._get_input_ids_and_config()
_A : int = False
_A : Any = max_length
_A : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
_A : Optional[int] = model_class(_a )
_A : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_A : Dict = getattr(_a , _a )
_A : List[str] = pt_model_class(_a ).eval()
_A : Tuple = load_flax_weights_in_pytorch_model(_a , flax_model.params )
_A : Optional[int] = flax_model.generate(_a ).sequences
_A : str = pt_model.generate(torch.tensor(_a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_A : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__ ( self ) -> Union[str, Any]:
_A , _A , _A , _A : Tuple = self._get_input_ids_and_config()
_A : Dict = False
_A : Any = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[Any] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[Any] = jit(model.generate )
_A : Union[str, Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Union[str, Any]:
_A , _A , _A , _A : Tuple = self._get_input_ids_and_config()
_A : List[str] = True
_A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Union[str, Any] = model_class(_a )
_A : Dict = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : str = jit(model.generate )
_A : int = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
_A , _A , _A , _A : Optional[int] = self._get_input_ids_and_config()
_A : List[Any] = False
_A : str = max_length
_A : List[Any] = 2
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Dict = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Dict = jit(model.generate )
_A : List[Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[Any]:
_A , _A , _A , _A : List[str] = self._get_input_ids_and_config()
_A : int = False
_A : Optional[Any] = max_length
_A : Optional[Any] = 2
_A : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Any = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> List[str]:
_A , _A , _A , _A : Optional[Any] = self._get_input_ids_and_config()
_A : List[Any] = True
_A : List[Any] = max_length
_A : List[str] = 0.8
_A : Optional[Any] = 10
_A : List[str] = 0.3
_A : Optional[Any] = 1
_A : Any = 8
_A : Tuple = 9
for model_class in self.all_generative_model_classes:
_A : Tuple = model_class(_a )
_A : int = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Optional[Any] = jit(model.generate )
_A : str = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
_A , _A , _A , _A : Optional[int] = self._get_input_ids_and_config()
_A : Union[str, Any] = max_length
_A : List[Any] = 1
_A : Optional[Any] = 8
_A : Any = 9
for model_class in self.all_generative_model_classes:
_A : Any = model_class(_a )
_A : int = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : int = jit(model.generate )
_A : List[Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
_A , _A , _A , _A : str = self._get_input_ids_and_config()
_A : Any = max_length
_A : Dict = 2
_A : Tuple = 1
_A : List[Any] = 8
_A : Tuple = 9
for model_class in self.all_generative_model_classes:
_A : int = model_class(_a )
_A : Tuple = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : int = jit(model.generate )
_A : int = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Any:
_A , _A , _A , _A : int = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[Any] = attention_mask.at[(0, 0)].set(0 )
_A : Dict = False
_A : int = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[int] = model_class(_a )
_A : List[str] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[str] = jit(model.generate )
_A : str = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
_A , _A , _A , _A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[str] = attention_mask.at[(0, 0)].set(0 )
_A : Optional[Any] = True
_A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Tuple = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[Any] = jit(model.generate )
_A : Any = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> str:
_A , _A , _A , _A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[str] = attention_mask.at[(0, 0)].set(0 )
_A : str = 2
_A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[Any] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Any = jit(model.generate )
_A : Any = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
_A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
_A : List[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A : Tuple = """Hello world"""
_A : List[Any] = tokenizer(_a , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_a , """do_samples""" ):
model.generate(_a , do_samples=_a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_a , """foo""" ):
_A : Union[str, Any] = {"""foo""": """bar"""}
model.generate(_a , **_a )
| 343 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 1 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = state_dict.pop(snake_case_ )
_A : Dict = val
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A : List[str] = key.replace("""backbone.0.body""","""backbone.conv_encoder.model""" )
_A : Optional[int] = value
else:
_A : Any = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
_A : Dict = """"""
if is_panoptic:
_A : List[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A : Tuple = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_A : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Optional[Any] = in_proj_weight[:256, :]
_A : List[str] = in_proj_bias[:256]
_A : Optional[Any] = in_proj_weight[256:512, :]
_A : Tuple = in_proj_bias[256:512]
_A : Dict = in_proj_weight[-256:, :]
_A : Optional[Any] = in_proj_bias[-256:]
def lowerCAmelCase_ ( ):
_A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Tuple = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A : Union[str, Any] = """resnet101"""
if "dc5" in model_name:
_A : Optional[int] = True
_A : Union[str, Any] = """panoptic""" in model_name
if is_panoptic:
_A : Tuple = 250
else:
_A : Optional[Any] = 91
_A : List[str] = """huggingface/label-files"""
_A : Union[str, Any] = """coco-detection-id2label.json"""
_A : Optional[int] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : List[str] = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Tuple = idalabel
_A : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
_A : Optional[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_A : int = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
_A : Any = prepare_img()
_A : List[Any] = image_processor(images=snake_case_,return_tensors="""pt""" )
_A : List[str] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
_A : Dict = torch.hub.load("""DeppMeng/ConditionalDETR""",snake_case_,pretrained=snake_case_ ).eval()
_A : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A : Dict = """conditional_detr.""" + src
rename_key(snake_case_,snake_case_,snake_case_ )
_A : Dict = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_,is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A : str = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_A : str = state_dict.pop(snake_case_ )
_A : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A : Union[str, Any] = state_dict.pop(snake_case_ )
_A : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_A : str = state_dict.pop(snake_case_ )
_A : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_A : Union[str, Any] = state_dict.pop(snake_case_ )
_A : int = val
# finally, create HuggingFace model and load state dict
_A : Dict = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_,organization="""DepuMeng""",commit_message="""Add model""" )
# verify our conversion
_A : Tuple = conditional_detr(snake_case_ )
_A : Tuple = model(snake_case_ )
assert torch.allclose(outputs.logits,original_outputs["""pred_logits"""],atol=1e-4 )
assert torch.allclose(outputs.pred_boxes,original_outputs["""pred_boxes"""],atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks,original_outputs["""pred_masks"""],atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = DEFAULT_DEVICE,snake_case_=False,snake_case_="summarization",snake_case_=None,**snake_case_,):
_A : Tuple = Path(snake_case_ ).open("""w""",encoding="""utf-8""" )
_A : Optional[int] = str(snake_case_ )
_A : Tuple = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).to(snake_case_ )
if fpaa:
_A : Optional[Any] = model.half()
_A : Any = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
_A : Tuple = time.time()
# update config with task specific params
use_task_specific_params(snake_case_,snake_case_ )
if prefix is None:
_A : Dict = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
for examples_chunk in tqdm(list(chunks(snake_case_,snake_case_ ) ) ):
_A : List[Any] = [prefix + text for text in examples_chunk]
_A : Optional[Any] = tokenizer(snake_case_,return_tensors="""pt""",truncation=snake_case_,padding="""longest""" ).to(snake_case_ )
_A : List[str] = model.generate(
input_ids=batch.input_ids,attention_mask=batch.attention_mask,**snake_case_,)
_A : Optional[Any] = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
_A : Any = int(time.time() - start_time ) # seconds
_A : List[Any] = len(snake_case_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs,4 )}
def lowerCAmelCase_ ( ):
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowerCAmelCase_ ( snake_case_=True ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""",type=snake_case_,help="""where to save summaries""" )
parser.add_argument("""--reference_path""",type=snake_case_,required=snake_case_,help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""",type=snake_case_,required=snake_case_,default="""metrics.json""",help="""where to save metrics""" )
parser.add_argument("""--device""",type=snake_case_,required=snake_case_,default=snake_case_,help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=-1,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--dump-args""",action="""store_true""",help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""",nargs="""?""",type=snake_case_,const=datetime_now(),help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
),)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A : Optional[int] = parser.parse_known_args()
_A : Dict = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
_A : Dict = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
_A : int = generate_summaries_or_translations(
snake_case_,args.save_path,args.model_name,batch_size=args.bs,device=args.device,fpaa=args.fpaa,task=args.task,prefix=args.prefix,**snake_case_,)
if args.reference_path is None:
return {}
# Compute scores
_A : Any = calculate_bleu if """translation""" in args.task else calculate_rouge
_A : str = [x.rstrip() for x in open(args.save_path ).readlines()]
_A : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case_ )]
_A : dict = score_fn(snake_case_,snake_case_ )
scores.update(snake_case_ )
if args.dump_args:
scores.update(snake_case_ )
if args.info:
_A : Dict = args.info
if verbose:
print(snake_case_ )
if args.score_path is not None:
json.dump(snake_case_,open(args.score_path,"""w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""String lengths must match!""" )
_A : str = 0
for chara, chara in zip(snake_case_,snake_case_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = BertJapaneseTokenizer
_a = False
_a = True
def a__ ( self ) -> Any:
super().setUp()
_A : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
_A : Optional[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def a__ ( self , _a ) -> Union[str, Any]:
_A , _A : List[str] = self.get_input_output_texts(_a )
_A : Optional[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_A : int = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def a__ ( self ) -> int:
pass # TODO add if relevant
def a__ ( self ) -> List[str]:
pass # TODO add if relevant
def a__ ( self ) -> Optional[Any]:
pass # TODO add if relevant
def a__ ( self ) -> Optional[int]:
_A : int = self.tokenizer_class(self.vocab_file )
_A : List[str] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(_a )
_A : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
_A : Union[str, Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : str = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : List[str] = pickle.load(_a )
_A : str = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[Any]:
_A : str = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Tuple:
try:
_A : str = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> List[Any]:
try:
_A : Any = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Optional[int]:
_A : List[Any] = MecabTokenizer(do_lower_case=_a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
try:
_A : List[Any] = MecabTokenizer(
do_lower_case=_a , normalize_text=_a , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
_A : Dict = MecabTokenizer(normalize_text=_a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def a__ ( self ) -> List[str]:
_A : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(_a )
_A : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_A : Dict = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : List[str] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : Any = pickle.load(_a )
_A : int = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_sudachi
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> List[Any]:
_A : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def a__ ( self ) -> Tuple:
_A : str = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def a__ ( self ) -> Optional[Any]:
_A : str = SudachiTokenizer(do_lower_case=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> Optional[int]:
_A : Tuple = SudachiTokenizer(normalize_text=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> List[str]:
_A : List[Any] = SudachiTokenizer(trim_whitespace=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Tuple:
_A : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(_a )
_A : List[Any] = """こんにちは、世界。\nこんばんは、世界。"""
_A : str = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : Any = pickle.load(_a )
_A : Dict = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_jumanpp
def a__ ( self ) -> Optional[Any]:
_A : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = JumanppTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> List[str]:
_A : str = JumanppTokenizer(normalize_text=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = JumanppTokenizer(trim_whitespace=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> str:
_A : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
_A : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_A : int = {}
for i, token in enumerate(_a ):
_A : List[Any] = i
_A : Any = WordpieceTokenizer(vocab=_a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def a__ ( self ) -> List[Any]:
_A : Tuple = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_A : str = tokenizer.subword_tokenizer
_A : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(_a , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_A : Dict = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(_a , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def a__ ( self ) -> Dict:
_A : str = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_A : Optional[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=_a )
_A : str = tokenizer.encode("""どういたしまして。""" , add_special_tokens=_a )
_A : List[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_A : Tuple = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = BertJapaneseTokenizer
_a = False
def a__ ( self ) -> Any:
super().setUp()
_A : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , **_a ) -> Optional[int]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **_a )
def a__ ( self , _a ) -> int:
_A : str = """こんにちは、世界。 \nこんばんは、世界。"""
_A : int = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def a__ ( self ) -> Tuple:
pass # TODO add if relevant
def a__ ( self ) -> Tuple:
pass # TODO add if relevant
def a__ ( self ) -> str:
pass # TODO add if relevant
def a__ ( self ) -> Any:
_A : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_A : List[str] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
_a , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def a__ ( self ) -> Tuple:
_A : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A : Dict = {}
for i, token in enumerate(_a ):
_A : Tuple = i
_A : int = CharacterTokenizer(vocab=_a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def a__ ( self ) -> Dict:
_A : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_A : int = tokenizer.encode("""ありがとう。""" , add_special_tokens=_a )
_A : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=_a )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_A : int = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = """cl-tohoku/bert-base-japanese"""
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
_A : Dict = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_A : str = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 343 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 1 |
from math import ceil
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(range(0,snake_case_ ) )
_A : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_A : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
_A : Tuple = [i for i in blocks if i not in device_map_blocks]
_A : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = list(range(snake_case_ ) )
_A : List[str] = int(ceil(n_layers / len(snake_case_ ) ) )
_A : Tuple = [layers[i : i + n_blocks] for i in range(0,snake_case_,snake_case_ )]
return dict(zip(snake_case_,snake_case_ ) )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase ( unittest.TestCase ):
def a__ ( self , _a ) -> Optional[Any]:
_A : int = 3
_A : Any = 250
_A : Any = ids_tensor((batch_size, length) , _a )
_A : Optional[int] = torch.ones((batch_size, length) , device=_a , dtype=torch.float ) / length
return input_ids, scores
def a__ ( self ) -> Union[str, Any]:
_A , _A : Union[str, Any] = self._get_tensors(5 )
_A : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_a , _a ) )
_A , _A : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_A , _A : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def a__ ( self ) -> Tuple:
_A : Tuple = MaxLengthCriteria(max_length=10 )
_A , _A : str = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
_A , _A : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_A , _A : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def a__ ( self ) -> List[Any]:
_A : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_A , _A : Any = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
_A , _A : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_A , _A : Dict = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
_A : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__ ( self ) -> Dict:
_A , _A : int = self._get_tensors(5 )
_A : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_a , _a ) )
_A : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_a , _a ) )
def a__ ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_A : int = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_a ) , 1 )
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_snake_case = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_snake_case = "sshleifer/student_marian_en_ro_6_1"
_snake_case = "sshleifer/tiny-mbart"
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self , _a=False , _a=None , _a=True , _a=True , _a=True , _a=True , ) -> Tuple:
_A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , )
_A : Any = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_A : Dict = [log for log in logs if """eval_loss""" in log.keys()]
_A : int = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_A : str = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a__ ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=_a )
@require_torch_multi_gpu
def a__ ( self ) -> int:
self.run_seqaseq_quick(distributed=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> List[Any]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> str:
self.run_seqaseq_quick(
distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_a )
@require_apex
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def a__ ( self , _a ) -> Union[str, Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_A : str = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_A : Dict = experiments[experiment_id]
_A : str = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_A : List[str] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_a , extra_args_str=data["""extra_args_str"""] )
_A : List[Any] = len(re.findall(_a , cl.err ) )
self.assertEqual(_a , data["""n_matches"""] )
@slow
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=10 , distributed=_a , )
# Check metrics
_A : str = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
_A : Union[str, Any] = [log for log in logs if """eval_loss""" in log.keys()]
_A : int = eval_metrics[0]
_A : Union[str, Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
# test if do_predict saves generations and metrics
_A : Tuple = os.listdir(_a )
_A : Union[str, Any] = {os.path.basename(_a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a__ ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_a ) -> Tuple[int, float]:
_A : Union[str, Any] = """--skip_memory_metrics 0"""
_A : Tuple = self.run_trainer(
max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , )
# Check metrics
_A : Union[str, Any] = TrainerState.load_from_json(Path(_a , """trainer_state.json""" ) ).log_history
_A : str = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_A : str = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_A : Dict = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_A , _A , _A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_A , _A , _A : Union[str, Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_A : Union[str, Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_A : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
_A : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_A : Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_A : Optional[int] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_a , _a , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
_a , _a , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
_a , _a , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def a__ ( self , _a , _a , _a , _a = 3e-3 , _a = "adafactor" , _a = False , _a = None , _a = 0 , _a = True , _a = True , _a = True , _a = True , _a = None , ) -> int:
_A : Dict = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_A : int = self.get_auto_remove_tmp_dir()
_A : str = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_a )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_a )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_A : List[Any] = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_a )}
'''.split()
_A : List[str] = """
--do_predict
""".split()
_A : List[str] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_A : Union[str, Any] = get_gpu_count()
_A : List[str] = get_torch_dist_unique_port()
_A : Optional[Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_A : Optional[Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_a , env=self.get_env() )
else:
_A : str = ["""run_translation.py"""] + args
with patch.object(_a , """argv""" , _a ):
main()
return output_dir
| 343 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model
_A : str = BertConfig.from_json_file(snake_case_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : Optional[int] = BertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 1 |
import os
def lowerCAmelCase_ ( ):
with open(os.path.dirname(snake_case_ ) + """/p022_names.txt""" ) as file:
_A : Optional[int] = str(file.readlines()[0] )
_A : Optional[Any] = names.replace("""\"""","""""" ).split(""",""" )
names.sort()
_A : List[Any] = 0
_A : Union[str, Any] = 0
for i, name in enumerate(snake_case_ ):
for letter in name:
name_score += ord(snake_case_ ) - 64
total_score += (i + 1) * name_score
_A : Union[str, Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
import argparse
import struct
import unittest
class lowercase :
def __init__( self , _a ) -> None:
_A : Optional[int] = data
# Initialize hash values
_A : int = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
_A : str = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
_A : int = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def a__ ( _a ) -> bytes:
_A : int = B"""\x80""" + (B"""\x00""" * (63 - (len(_a ) + 8) % 64))
_A : str = struct.pack(""">Q""" , (len(_a ) * 8) )
return data + padding + big_endian_integer
def a__ ( self ) -> None:
# Convert into blocks of 64 bytes
_A : Any = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_A : Tuple = list(struct.unpack(""">16L""" , _a ) )
# add 48 0-ed integers
words += [0] * 48
_A , _A , _A , _A , _A , _A , _A , _A : List[str] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_A : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_A : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_A : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
_A : int = self.ror(_a , 6 ) ^ self.ror(_a , 11 ) ^ self.ror(_a , 25 )
_A : Dict = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
_A : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
_A : Union[str, Any] = self.ror(_a , 2 ) ^ self.ror(_a , 13 ) ^ self.ror(_a , 22 )
_A : List[str] = (a & b) ^ (a & c) ^ (b & c)
_A : Optional[Any] = (sa + maj) % 0x1_00_00_00_00
_A , _A , _A , _A , _A , _A , _A , _A : str = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
_A : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
_A : Union[str, Any] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
_A : int = """""".join([hex(_a )[2:].zfill(8 ) for value in self.hashes] )
def a__ ( self , _a , _a ) -> int:
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> None:
import hashlib
_A : Dict = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_a ).hash , hashlib.shaaaa(_a ).hexdigest() )
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
_A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-s""","""--string""",dest="""input_string""",default="""Hello World!! Welcome to Cryptography""",help="""Hash the string""",)
parser.add_argument(
"""-f""","""--file""",dest="""input_file""",help="""Hash contents of a file""" )
_A : Optional[Any] = parser.parse_args()
_A : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file,"""rb""" ) as f:
_A : Tuple = f.read()
else:
_A : Union[str, Any] = bytes(snake_case_,"""utf-8""" )
print(SHAaaa(snake_case_ ).hash )
if __name__ == "__main__":
main()
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER","False" ) ) is not True,reason="Skipping test because should only be run when releasing minor transformers version",)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_a , )
assert hasattr(self , """env""" )
def a__ ( self , _a ) -> Union[str, Any]:
# configuration for running training on smdistributed Model Parallel
_A : List[str] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_A : int = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_A : Union[str, Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_A : List[Any] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version="""py36""" , )
def a__ ( self , _a ) -> Dict:
TrainingJobAnalytics(_a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def a__ ( self , _a ) -> str:
# create estimator
_A : str = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
_A : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_A : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _a )
| 343 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = ShapEPipeline
_a = ["prompt"]
_a = ["prompt"]
_a = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Optional[int]:
return 8
@property
def a__ ( self ) -> str:
_A : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self ) -> Any:
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_a )
@property
def a__ ( self ) -> Dict:
torch.manual_seed(0 )
_A : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_A : str = PriorTransformer(**_a )
return model
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
_A : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_A : Tuple = ShapERenderer(**_a )
return model
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = self.dummy_prior
_A : Dict = self.dummy_text_encoder
_A : Union[str, Any] = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_renderer
_A : Optional[int] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
_A : Tuple = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[Any]:
if str(_a ).startswith("""mps""" ):
_A : Dict = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : Tuple = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Optional[int]:
_A : List[str] = """cpu"""
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**_a )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Any = output.images[0]
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A : int = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self ) -> int:
_A : Union[str, Any] = torch_device == """cpu"""
_A : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = self.get_dummy_components()
_A : Tuple = self.pipeline_class(**_a )
_A : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = 1
_A : List[str] = 2
_A : Tuple = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
_A : Optional[int] = batch_size * [inputs[key]]
_A : Optional[int] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
_A : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_A : Tuple = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_A : str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device=_a ).manual_seed(0 )
_A : List[str] = pipe(
"""a shark""" , generator=_a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 343 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
raise TypeError("""Input value must be an 'int' type""" )
_A : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ = False ):
if not isinstance(snake_case_,snake_case_ ):
_A : Optional[Any] = f'''Expected string as input, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
if not isinstance(snake_case_,snake_case_ ):
_A : List[Any] = f'''Expected boolean as use_pascal parameter, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
_A : Any = input_str.split("""_""" )
_A : Optional[int] = 0 if use_pascal else 1
_A : List[Any] = words[start_index:]
_A : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : List[str] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = len(snake_case_ )
for i in range(length - 1 ):
_A : List[str] = i
for k in range(i + 1,snake_case_ ):
if collection[k] < collection[least]:
_A : Dict = k
if least != i:
_A , _A : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 343 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase :
pass
| 343 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 1 |
from __future__ import annotations
import pandas as pd
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = [0] * no_of_processes
_A : List[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case_ ):
_A : Optional[Any] = burst_time[i]
_A : Tuple = 0
_A : Union[str, Any] = 0
_A : str = 999999999
_A : List[Any] = 0
_A : int = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_A : Union[str, Any] = remaining_time[j]
_A : Any = j
_A : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_A : Dict = remaining_time[short]
if minm == 0:
_A : str = 999999999
if remaining_time[short] == 0:
complete += 1
_A : List[Any] = False
# Find finish time of current process
_A : List[Any] = increment_time + 1
# Calculate waiting time
_A : Optional[int] = finish_time - arrival_time[short]
_A : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
_A : int = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = [0] * no_of_processes
for i in range(snake_case_ ):
_A : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = 0
_A : Union[str, Any] = 0
for i in range(snake_case_ ):
_A : int = total_waiting_time + waiting_time[i]
_A : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""",total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
_snake_case = int(input())
_snake_case = [0] * no_of_processes
_snake_case = [0] * no_of_processes
_snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
_snake_case , _snake_case = map(int, input().split())
_snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_snake_case = burst_time
_snake_case = no_of_processes
_snake_case = waiting_time
_snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 343 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 1 |
from __future__ import annotations
from typing import Any
class lowercase ( UpperCamelCase__ ):
pass
class lowercase :
def __init__( self , _a ) -> None:
_A : Any = data
_A : Node | None = None
def __iter__( self ) -> Union[str, Any]:
_A : Dict = self
_A : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
_A : List[Any] = node.next_node
@property
def a__ ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_snake_case = Node(1)
_snake_case = Node(2)
_snake_case = Node(3)
_snake_case = Node(4)
print(root_node.has_loop) # False
_snake_case = root_node.next_node
print(root_node.has_loop) # True
_snake_case = Node(5)
_snake_case = Node(6)
_snake_case = Node(5)
_snake_case = Node(6)
print(root_node.has_loop) # False
_snake_case = Node(1)
print(root_node.has_loop) # False
| 343 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
_A : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
_A : Dict = f'''Input value of [number={number}] must be > 0'''
raise ValueError(snake_case_ )
_A : List[str] = 1
for i in range(1,snake_case_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None ):
if start is None:
_A : List[str] = 0
if end is None:
_A : List[str] = len(snake_case_ ) - 1
if start >= end:
return
_A : Optional[Any] = (start + end) // 2
slowsort(snake_case_,snake_case_,snake_case_ )
slowsort(snake_case_,mid + 1,snake_case_ )
if sequence[end] < sequence[mid]:
_A , _A : str = sequence[mid], sequence[end]
slowsort(snake_case_,snake_case_,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_A : Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1,required_sum + 1 ):
_A : Optional[int] = False
for i in range(1,arr_len + 1 ):
for j in range(1,required_sum + 1 ):
if arr[i - 1] > j:
_A : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
_A : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = checkpoints.load_tax_checkpoint(snake_case_ )
_A : Any = flatten_dict(snake_case_ )
return flax_params
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = {}
_A : Union[str, Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
_A : str = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_A : Union[str, Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_A : Tuple = new_key.replace(snake_case_,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_A : Union[str, Any] = new_key.replace(snake_case_,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_A : Dict = re.sub(r"""layers_(\d+)""",r"""layer.\1""",snake_case_ )
_A : Union[str, Any] = new_key.replace("""encoder""","""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_A : Tuple = re.sub(r"""layers_(\d+)""",r"""layer.\1""",snake_case_ )
_A : Dict = flax_dict[key]
_A : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_A : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
_A : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_=False ):
_A : List[str] = get_flax_param(snake_case_ )
if not use_large:
_A : List[Any] = PixaStructVisionConfig()
_A : str = PixaStructTextConfig()
else:
_A : str = PixaStructVisionConfig(
hidden_size=1536,d_ff=3968,num_attention_heads=24,num_hidden_layers=18 )
_A : Tuple = PixaStructTextConfig(hidden_size=1536,d_ff=3968,num_heads=24,num_layers=18 )
_A : Any = PixaStructConfig(
vision_config=encoder_config.to_dict(),text_config=decoder_config.to_dict(),is_vqa=snake_case_ )
_A : Optional[Any] = PixaStructForConditionalGeneration(snake_case_ )
_A : Dict = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
_A : int = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
_A : List[str] = PixaStructImageProcessor()
_A : int = PixaStructProcessor(image_processor=snake_case_,tokenizer=snake_case_ )
if use_large:
_A : Dict = 4096
_A : List[str] = True
# mkdir if needed
os.makedirs(snake_case_,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_snake_case = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowercase :
def __init__( self , _a = 14 ) -> None:
if group not in primes:
raise ValueError("""Unsupported Group""" )
_A : List[str] = primes[group]["""prime"""]
_A : Optional[int] = primes[group]["""generator"""]
_A : Any = int(hexlify(urandom(32 ) ) , base=16 )
def a__ ( self ) -> str:
return hex(self.__private_key )[2:]
def a__ ( self ) -> str:
_A : Optional[int] = pow(self.generator , self.__private_key , self.prime )
return hex(_a )[2:]
def a__ ( self , _a ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_a , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ ( self , _a ) -> str:
_A : Union[str, Any] = int(_a , base=16 )
if not self.is_valid_public_key(_a ):
raise ValueError("""Invalid public key""" )
_A : List[str] = pow(_a , self.__private_key , self.prime )
return shaaaa(str(_a ).encode() ).hexdigest()
@staticmethod
def a__ ( _a , _a ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_a , (prime - 1) // 2 , _a ) == 1
)
@staticmethod
def a__ ( _a , _a , _a = 14 ) -> str:
_A : Any = int(_a , base=16 )
_A : Dict = int(_a , base=16 )
_A : Union[str, Any] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_a , _a ):
raise ValueError("""Invalid public key""" )
_A : str = pow(_a , _a , _a )
return shaaaa(str(_a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
import numpy as np
class lowercase :
def __init__( self ) -> Optional[int]:
_A : List[str] = (0, 0)
_A : Union[str, Any] = None
_A : Optional[Any] = 0
_A : Any = 0
_A : str = 0
def __eq__( self , _a ) -> Tuple:
return self.position == cell.position
def a__ ( self ) -> Any:
print(self.position )
class lowercase :
def __init__( self , _a=(5, 5) ) -> int:
_A : Optional[Any] = np.zeros(_a )
_A : Union[str, Any] = world_size[0]
_A : str = world_size[1]
def a__ ( self ) -> str:
print(self.w )
def a__ ( self , _a ) -> Optional[int]:
_A : List[str] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_A : str = cell.position[0]
_A : str = cell.position[1]
_A : List[Any] = []
for n in neughbour_cord:
_A : str = current_x + n[0]
_A : Dict = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_A : int = Cell()
_A : Union[str, Any] = (x, y)
_A : Optional[int] = cell
neighbours.append(_a )
return neighbours
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = []
_A : str = []
_open.append(snake_case_ )
while _open:
_A : List[str] = np.argmin([n.f for n in _open] )
_A : Optional[Any] = _open[min_f]
_closed.append(_open.pop(snake_case_ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case_ ):
for c in _closed:
if c == n:
continue
_A : Optional[Any] = current.g + 1
_A , _A : str = n.position
_A , _A : Any = goal.position
_A : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_A : Dict = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case_ )
_A : Union[str, Any] = []
while current.parent is not None:
path.append(current.position )
_A : str = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_snake_case = Gridworld()
# Start position and goal
_snake_case = Cell()
_snake_case = (0, 0)
_snake_case = Cell()
_snake_case = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_snake_case = 1
print(world.w)
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Tuple:
_A : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : int = 1
_A : Union[str, Any] = self.unet(_a , _a ).sample
_A : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
_A : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 343 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
_A : List[Any] = 0
@slow
def a__ ( self ) -> List[str]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A : Tuple = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
def a__ ( self ) -> List[str]:
_A : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def a__ ( self ) -> Dict:
_A : Optional[Any] = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
# Check that tokenizer_type ≠ model_type
_A : Tuple = AutoTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : Optional[int] = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" , use_fast=_a )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : List[str] = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" , use_fast=_a )
self.assertIsInstance(_a , _a )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : Optional[Any] = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : str = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> str:
with pytest.raises(_a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def a__ ( self ) -> Dict:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A : List[str] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
if isinstance(_a , _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a )
else:
self.assertEqual(tokenizer.do_lower_case , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def a__ ( self ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
_A : Optional[Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def a__ ( self ) -> Union[str, Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_A : Tuple = TOKENIZER_MAPPING.values()
_A : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def a__ ( self ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_a ) , _a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _a )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_a )
_A : Dict = """Hello, world. How are you?"""
_A : List[Any] = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
_A : Tuple = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_a )
_A : str = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_a ) , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def a__ ( self ) -> Any:
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def a__ ( self ) -> Optional[Any]:
_A : Dict = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Check we can load the tokenizer config of an online model.
_A : List[str] = get_tokenizer_config("""bert-base-cased""" )
_A : Union[str, Any] = config.pop("""_commit_hash""" , _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_A : Dict = get_tokenizer_config(_a )
self.assertDictEqual(_a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A : Tuple = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : int = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def a__ ( self ) -> List[str]:
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
_A : Dict = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : Any = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _a )
# Can register in two steps
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a , slow_tokenizer_class=_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A : int = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
_A : List[Any] = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_A : str = AutoTokenizer.from_pretrained(_a , use_fast=_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
_A : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : Tuple = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : Optional[Any] = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
class lowercase ( UpperCamelCase__ ):
_a = False
class lowercase ( UpperCamelCase__ ):
_a = NewTokenizer
_a = False
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_A : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : int = AutoTokenizer.from_pretrained("""bert-base""" )
def a__ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : int = AutoTokenizer.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_A : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> List[str]:
_A : Any = parent
_A : Dict = batch_size
_A : Optional[int] = seq_length
_A : int = is_training
_A : List[str] = use_attention_mask
_A : List[str] = use_token_type_ids
_A : Optional[Any] = use_labels
_A : Optional[int] = vocab_size
_A : Optional[Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Dict = intermediate_size
_A : List[str] = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Tuple = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : Union[str, Any] = initializer_range
_A : Tuple = num_choices
def a__ ( self ) -> Optional[int]:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : List[str] = None
if self.use_attention_mask:
_A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_A : str = None
if self.use_token_type_ids:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> int:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A , _A : List[str] = config_and_inputs
_A : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
_A : Tuple = self.prepare_config_and_inputs()
_A , _A , _A , _A : int = config_and_inputs
_A : Dict = True
_A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
_A : Union[str, Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[str]:
_A : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Union[str, Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_A : int = model(_a )[0]
_A : Tuple = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
_A : str = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def a__ ( self ) -> Optional[Any]:
_A : str = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Tuple = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_A : Dict = model(_a )[0]
# compare the actual values for a slice.
_A : Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase_ ( snake_case_ ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = create_tensor(snake_case_ )
_A : Any = gather(snake_case_ )
assert gathered_tensor.tolist() == list(range(1,state.num_processes**2 + 1 ) )
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = [state.process_index]
_A : str = gather_object(snake_case_ )
assert len(snake_case_ ) == state.num_processes, f'''{gathered_obj}, {len(snake_case_ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowerCAmelCase_ ( snake_case_ ):
_A : int = create_tensor(snake_case_ )
_A : List[str] = broadcast(snake_case_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1,state.num_processes + 1 ) )
def lowerCAmelCase_ ( snake_case_ ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_A : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_A : str = torch.arange(state.num_processes ).to(state.device )
_A : int = pad_across_processes(snake_case_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0,state.num_processes ) ) + [0]
def lowerCAmelCase_ ( snake_case_ ):
# For now runs on only two processes
if state.num_processes != 2:
return
_A : str = create_tensor(snake_case_ )
_A : List[str] = reduce(snake_case_,"""sum""" )
_A : Dict = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case_,snake_case_ ), f'''{reduced_tensor} != {truth_tensor}'''
def lowerCAmelCase_ ( snake_case_ ):
# For now runs on only two processes
if state.num_processes != 2:
return
_A : int = create_tensor(snake_case_ )
_A : int = reduce(snake_case_,"""mean""" )
_A : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case_,snake_case_ ), f'''{reduced_tensor} != {truth_tensor}'''
def lowerCAmelCase_ ( snake_case_ ):
# For xla_spawn (TPUs)
main()
def lowerCAmelCase_ ( ):
_A : Optional[int] = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(snake_case_ )
state.print("""testing gather_object""" )
test_gather_object(snake_case_ )
state.print("""testing broadcast""" )
test_broadcast(snake_case_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case_ )
if __name__ == "__main__":
main()
| 343 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = git.Repo(search_parent_directories=snake_case_ )
_A : int = {
"""repo_id""": str(snake_case_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(snake_case_,"""git_log.json""" ),"""w""" ) as f:
json.dump(snake_case_,snake_case_,indent=4 )
def lowerCAmelCase_ ( snake_case_ ):
if params.n_gpu <= 0:
_A : Optional[int] = 0
_A : Any = -1
_A : Dict = True
_A : List[str] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
_A : str = int(os.environ["""WORLD_SIZE"""] )
_A : Any = int(os.environ["""N_GPU_NODE"""] )
_A : Union[str, Any] = int(os.environ["""RANK"""] )
# number of nodes / node ID
_A : int = params.world_size // params.n_gpu_per_node
_A : Optional[int] = params.global_rank // params.n_gpu_per_node
_A : List[Any] = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
_A : str = 1
_A : Union[str, Any] = 0
_A : List[str] = 0
_A : Dict = 0
_A : Union[str, Any] = 1
_A : Optional[int] = 1
_A : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_A : Any = params.node_id == 0 and params.local_rank == 0
_A : Optional[int] = params.n_nodes > 1
# summary
_A : Union[str, Any] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""",backend="""nccl""",)
def lowerCAmelCase_ ( snake_case_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = AudioLDMPipeline
_a = TEXT_TO_AUDIO_PARAMS
_a = TEXT_TO_AUDIO_BATCH_PARAMS
_a = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def a__ ( self ) -> Any:
torch.manual_seed(0 )
_A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_a , )
_A : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_A : Optional[int] = ClapTextModelWithProjection(_a )
_A : Union[str, Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_A : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_a , )
_A : Optional[int] = SpeechTaHifiGan(_a )
_A : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[Any]:
if str(_a ).startswith("""mps""" ):
_A : int = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : str = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a__ ( self ) -> Tuple:
_A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : List[Any] = AudioLDMPipeline(**_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = audioldm_pipe(**_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ) -> Optional[int]:
_A : List[str] = self.get_dummy_components()
_A : int = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = self.get_dummy_inputs(_a )
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Tuple = audioldm_pipe(**_a )
_A : Optional[int] = output.audios[0]
_A : Union[str, Any] = self.get_dummy_inputs(_a )
_A : Optional[int] = 3 * [inputs.pop("""prompt""" )]
_A : Tuple = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : List[Any] = text_inputs["""input_ids"""].to(_a )
_A : Dict = audioldm_pipe.text_encoder(
_a , )
_A : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : Optional[int] = F.normalize(_a , dim=-1 )
_A : List[Any] = prompt_embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : int = self.get_dummy_components()
_A : Union[str, Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
_A : Optional[int] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : str = 3 * ["""this is a negative prompt"""]
_A : List[str] = negative_prompt
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Union[str, Any] = audioldm_pipe(**_a )
_A : Union[str, Any] = output.audios[0]
_A : str = self.get_dummy_inputs(_a )
_A : Dict = 3 * [inputs.pop("""prompt""" )]
_A : Any = []
for p in [prompt, negative_prompt]:
_A : Optional[int] = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : Dict = text_inputs["""input_ids"""].to(_a )
_A : str = audioldm_pipe.text_encoder(
_a , )
_A : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : int = F.normalize(_a , dim=-1 )
embeds.append(_a )
_A , _A : str = embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ) -> Any:
_A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : Tuple = PNDMScheduler(skip_prk_steps=_a )
_A : List[Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = """egg cracking"""
_A : Dict = audioldm_pipe(**_a , negative_prompt=_a )
_A : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : List[str] = self.get_dummy_components()
_A : Any = PNDMScheduler(skip_prk_steps=_a )
_A : Tuple = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_A : Tuple = audioldm_pipe(_a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_A : int = 2
_A : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_A : Union[str, Any] = 2
_A : str = audioldm_pipe(_a , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_A : List[Any] = 2
_A : Optional[int] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__ ( self ) -> Any:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : List[str] = AudioLDMPipeline(**_a )
_A : Optional[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **_a )
_A : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.016
_A : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 , **_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.032
def a__ ( self ) -> str:
_A : List[Any] = self.get_dummy_components()
_A : Optional[Any] = AudioLDMPipeline(**_a )
_A : Union[str, Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : str = ["""hey"""]
_A : Union[str, Any] = audioldm_pipe(_a , num_inference_steps=1 )
_A : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_A : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_A : Dict = SpeechTaHifiGan(_a ).to(_a )
_A : Tuple = audioldm_pipe(_a , num_inference_steps=1 )
_A : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__ ( self ) -> Any:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a )
def a__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=_a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a )
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> int:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Any = np.random.RandomState(_a ).standard_normal((1, 8, 128, 16) )
_A : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a__ ( self ) -> Any:
_A : List[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Tuple = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = self.get_inputs(_a )
_A : Dict = 25
_A : Union[str, Any] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : int = audio[7_7230:7_7240]
_A : Dict = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a__ ( self ) -> Any:
_A : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_inputs(_a )
_A : Optional[int] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : Any = audio[2_7780:2_7790]
_A : int = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_A : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 343 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.