code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A ( a ):
def __lt__( self , snake_case_ ) -> Optional[int]:
return self[-1] < other[-1]
def __eq__( self , snake_case_ ) -> Optional[Any]:
return self[-1] == other[-1]
def _lowercase ( lowerCamelCase__ : list ):
_a = []
# sort into stacks
for element in collection:
_a = Stack([element] )
_a = bisect_left(lowerCamelCase__, lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
_a = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Any = input("Enter numbers separated by a comma:\n").strip()
__snake_case : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _lowercase ( ):
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/transformers" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()], key=lambda lowerCamelCase__ : i.created_at, reverse=lowerCamelCase__ )
_a = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str ):
_a = list(lowerCamelCase__ )
_a = list(lowerCamelCase__ )
_a = 0
for i in range(len(lowerCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
_a = "_"
if count > 1:
return False
else:
return "".join(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list[str] ):
_a = []
while True:
_a = ["$"] * len(lowerCamelCase__ )
_a = []
for i in range(len(lowerCamelCase__ ) ):
for j in range(i + 1, len(lowerCamelCase__ ) ):
_a = compare_string(binary[i], binary[j] )
if k is False:
_a = "*"
_a = "*"
temp.append("X" )
for i in range(len(lowerCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase__ ) == 0:
return pi
_a = list(set(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Sequence[float] ):
_a = []
for minterm in minterms:
_a = ""
for _ in range(lowerCamelCase__ ):
_a = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase__ )
return temp
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str, lowerCamelCase__ : int ):
_a = list(lowerCamelCase__ )
_a = list(lowerCamelCase__ )
_a = 0
for i in range(len(lowerCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : list[str] ):
_a = []
_a = [0] * len(lowerCamelCase__ )
for i in range(len(chart[0] ) ):
_a = 0
_a = -1
for j in range(len(lowerCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
_a = j
if count == 1:
_a = 1
for i in range(len(lowerCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase__ ) ):
_a = 0
temp.append(prime_implicants[i] )
while True:
_a = 0
_a = -1
_a = 0
for i in range(len(lowerCamelCase__ ) ):
_a = chart[i].count(1 )
if count_n > max_n:
_a = count_n
_a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase__ ) ):
_a = 0
def _lowercase ( lowerCamelCase__ : list[str], lowerCamelCase__ : list[str] ):
_a = [[0 for x in range(len(lowerCamelCase__ ) )] for x in range(len(lowerCamelCase__ ) )]
for i in range(len(lowerCamelCase__ ) ):
_a = prime_implicants[i].count("_" )
for j in range(len(lowerCamelCase__ ) ):
if is_for_table(prime_implicants[i], binary[j], lowerCamelCase__ ):
_a = 1
return chart
def _lowercase ( ):
_a = int(input("Enter the no. of variables\n" ) )
_a = [
float(lowerCamelCase__ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_a = decimal_to_binary(lowerCamelCase__, lowerCamelCase__ )
_a = check(lowerCamelCase__ )
print("Prime Implicants are:" )
print(lowerCamelCase__ )
_a = prime_implicant_chart(lowerCamelCase__, lowerCamelCase__ )
_a = selection(lowerCamelCase__, lowerCamelCase__ )
print("Essential Prime Implicants are:" )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : Optional[int] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A ( a ):
__UpperCAmelCase : str = ["""vqvae"""]
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ , mel=snake_case_ , vqvae=snake_case_ )
def __lowerCAmelCase ( self ) -> int:
return 5_0 if isinstance(self.scheduler , snake_case_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_a = steps or self.get_default_steps()
self.scheduler.set_timesteps(snake_case_ )
_a = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_a = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_a = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=snake_case_ , device=self.device , )
_a = noise
_a = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(snake_case_ , snake_case_ )
_a = self.mel.audio_slice_to_image(snake_case_ )
_a = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_a = (input_image / 2_5_5) * 2 - 1
_a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_a = self.vqvae.encode(torch.unsqueeze(snake_case_ , 0 ) ).latent_dist.sample(
generator=snake_case_ )[0]
_a = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_a = self.scheduler.add_noise(snake_case_ , snake_case_ , self.scheduler.timesteps[start_step - 1] )
_a = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_a = int(mask_start_secs * pixels_per_second )
_a = int(mask_end_secs * pixels_per_second )
_a = self.scheduler.add_noise(snake_case_ , snake_case_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , snake_case_ ):
_a = self.unet(snake_case_ , snake_case_ , snake_case_ )["sample"]
else:
_a = self.unet(snake_case_ , snake_case_ )["sample"]
if isinstance(self.scheduler , snake_case_ ):
_a = self.scheduler.step(
model_output=snake_case_ , timestep=snake_case_ , sample=snake_case_ , eta=snake_case_ , generator=snake_case_ , )["prev_sample"]
else:
_a = self.scheduler.step(
model_output=snake_case_ , timestep=snake_case_ , sample=snake_case_ , generator=snake_case_ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_a = mask[:, step, :, :mask_start]
if mask_end > 0:
_a = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_a = 1 / self.vqvae.config.scaling_factor * images
_a = self.vqvae.decode(snake_case_ )["sample"]
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_a = (images * 2_5_5).round().astype("uint8" )
_a = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(snake_case_ , mode="RGB" ).convert("L" ) for _ in images) )
_a = [self.mel.image_to_audio(snake_case_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(snake_case_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(snake_case_ ) )
@torch.no_grad()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , snake_case_ )
self.scheduler.set_timesteps(snake_case_ )
_a = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_a = (sample / 2_5_5) * 2 - 1
_a = torch.Tensor(snake_case_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_a = self.scheduler.alphas_cumprod[t]
_a = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_a = 1 - alpha_prod_t
_a = self.unet(snake_case_ , snake_case_ )["sample"]
_a = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ , snake_case_ ) -> torch.Tensor:
_a = acos(torch.dot(torch.flatten(snake_case_ ) , torch.flatten(snake_case_ ) ) / torch.norm(snake_case_ ) / torch.norm(snake_case_ ) )
return sin((1 - alpha) * theta ) * xa / sin(snake_case_ ) + sin(alpha * theta ) * xa / sin(snake_case_ )
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : str="attention" ):
_a = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple=False ):
if split_mlp_wi:
_a = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_a = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_a = (wi_a, wi_a)
else:
_a = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
_a = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def _lowercase ( lowerCamelCase__ : dict, *, lowerCamelCase__ : int, lowerCamelCase__ : bool ):
_a = traverse_util.flatten_dict(variables["target"] )
_a = {"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:", lowerCamelCase__ )
_a = collections.OrderedDict()
# Shared embeddings.
_a = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
_a = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, "encoder", "pre_attention_layer_norm" )
_a , _a , _a , _a = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, "encoder", "attention" )
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 1 (MLP).
_a = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, "encoder", "pre_mlp_layer_norm" )
_a , _a = tax_mlp_lookup(lowerCamelCase__, lowerCamelCase__, "encoder", lowerCamelCase__ )
_a = layer_norm
if split_mlp_wi:
_a = wi[0].T
_a = wi[1].T
else:
_a = wi.T
_a = wo.T
_a = old[
"encoder/relpos_bias/rel_embedding"
].T
_a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
_a = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", "pre_self_attention_layer_norm" )
_a , _a , _a , _a = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", "self_attention" )
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 1 (Cross Attention).
_a = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", "pre_cross_attention_layer_norm" )
_a , _a , _a , _a = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", "encoder_decoder_attention" )
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 2 (MLP).
_a = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", "pre_mlp_layer_norm" )
_a , _a = tax_mlp_lookup(lowerCamelCase__, lowerCamelCase__, "decoder", lowerCamelCase__ )
_a = layer_norm
if split_mlp_wi:
_a = wi[0].T
_a = wi[1].T
else:
_a = wi.T
_a = wo.T
_a = old["decoder/decoder_norm/scale"]
_a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a = old["decoder/logits_dense/kernel"].T
return new
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : bool ):
_a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_a = state_dict["shared.weight"]
return state_dict
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ):
_a = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
_a = convert_tax_to_pytorch(lowerCamelCase__, num_layers=config.num_layers, is_encoder_only=lowerCamelCase__ )
_a = make_state_dict(lowerCamelCase__, lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : str, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : bool = False ):
_a = TaConfig.from_json_file(lowerCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a = TaEncoderModel(lowerCamelCase__ )
else:
_a = TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
__snake_case : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return base * power(lowerCamelCase__, (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__snake_case : List[Any] = int(input("Enter the base: ").strip())
__snake_case : int = int(input("Enter the exponent: ").strip())
__snake_case : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__snake_case : Optional[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = AltDiffusionPipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_a = CLIPTextModel(snake_case_ )
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 7_7
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Dict:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(snake_case_ )
_a = text_encoder
_a = AltDiffusionPipeline(**snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = self.get_dummy_inputs(snake_case_ )
_a = "A photo of an astronaut"
_a = alt_pipe(**snake_case_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(snake_case_ )
_a = text_encoder
_a = AltDiffusionPipeline(**snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = self.get_dummy_inputs(snake_case_ )
_a = alt_pipe(**snake_case_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Tuple:
# make sure here that pndm scheduler skips prk
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case_ , safety_checker=snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="numpy" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
import math
def _lowercase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
_a = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase__ )
if number < 1:
_a = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a = int(math.log(number // 3, 2 ) ) + 2
_a = [3, 5]
_a = 2
_a = 3
for block in range(1, lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__snake_case : List[Any] = 0
try:
__snake_case : List[str] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
__snake_case : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__snake_case : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowercase ( lowerCamelCase__ : list[float] ):
_a = []
_a = len(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_a = -1
for j in range(i + 1, lowerCamelCase__ ):
if arr[i] < arr[j]:
_a = arr[j]
break
result.append(lowerCamelCase__ )
return result
def _lowercase ( lowerCamelCase__ : list[float] ):
_a = []
for i, outer in enumerate(lowerCamelCase__ ):
_a = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a = inner
break
result.append(lowerCamelCase__ )
return result
def _lowercase ( lowerCamelCase__ : list[float] ):
_a = len(lowerCamelCase__ )
_a = []
_a = [-1] * arr_size
for index in reversed(range(lowerCamelCase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__snake_case : Optional[int] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case : Tuple = logging.getLogger(__name__)
class A ( a ):
def __init__( self , snake_case_=-1 ) -> Union[str, Any]:
# in NER datasets, the last column is usually reserved for NER label
_a = label_idx
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(snake_case_ , snake_case_ ):
_a = mode.value
_a = os.path.join(snake_case_ , F'''{mode}.txt''' )
_a = 1
_a = []
with open(snake_case_ , encoding="utf-8" ) as f:
_a = []
_a = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
_a = []
_a = []
else:
_a = line.split(" " )
words.append(splits[0] )
if len(snake_case_ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
return examples
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(snake_case_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(snake_case_ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( a ):
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( a ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(snake_case_ , snake_case_ ):
_a = mode.value
_a = os.path.join(snake_case_ , F'''{mode}.txt''' )
_a = 1
_a = []
with open(snake_case_ , encoding="utf-8" ) as f:
for sentence in parse_incr(snake_case_ ):
_a = []
_a = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(snake_case_ ) == len(snake_case_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
return examples
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
_a = 0
for sentence in parse_incr(snake_case_ ):
_a = preds_list[example_id]
_a = ""
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(snake_case_ )
example_id += 1
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case : Dict = logging.getLogger(__name__)
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
return (preds == labels).mean()
@dataclass
class A :
__UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A :
__UpperCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__UpperCAmelCase : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__UpperCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
_a = processors[data_args.task_name]()
_a = processor.get_labels()
_a = len(lowerCamelCase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowerCamelCase__, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase__, cache_dir=model_args.cache_dir, )
# Get datasets
_a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowerCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowerCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase__ : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowerCamelCase__, p.label_ids )}
# Data collator
_a = DataCollatorWithPadding(lowerCamelCase__, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=lowerCamelCase__, eval_dataset=lowerCamelCase__, compute_metrics=lowerCamelCase__, data_collator=lowerCamelCase__, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir, "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCamelCase__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s", lowerCamelCase__, lowerCamelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCamelCase__ )
return results
def _lowercase ( lowerCamelCase__ : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
_a = inspect.getfile(accelerate.test_utils )
_a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Union[str, Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_a = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> str:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_a = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> str:
_a = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_a = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case : Union[str, Any] = Accelerator()
__snake_case : str = (accelerator.state.process_index + 2, 10)
__snake_case : str = torch.randint(0, 10, shape).to(accelerator.device)
__snake_case : int = ""
__snake_case : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__snake_case : List[str] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__snake_case : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=3_2 , snake_case_=3 , snake_case_=1_0 , snake_case_=[8, 1_6, 3_2, 6_4] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=1 , ) -> Any:
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(snake_case_ )
_a = out_features
_a = out_indices
_a = num_groups
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> str:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = BitModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_a = self.num_labels
_a = BitForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = BitBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a = None
_a = BitBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self ) -> int:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = False
def __lowerCAmelCase ( self ) -> Dict:
_a = BitModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="Bit does not output attentions" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(config=snake_case_ )
for name, module in model.named_modules():
if isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = BitModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self ) -> Any:
_a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_a = model(**snake_case_ )
# verify the logits
_a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_a = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
@require_torch
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
__UpperCAmelCase : int = BitConfig
__UpperCAmelCase : int = False
def __lowerCAmelCase ( self ) -> List[str]:
_a = BitModelTester(self )
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
__snake_case : List[str] = list[list[int]]
# assigning initial values to the grid
__snake_case : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( lowerCamelCase__ : Matrix, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( lowerCamelCase__ : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
if location := find_empty_location(lowerCamelCase__ ):
_a , _a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
_a = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
_a = 0
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
for row in grid:
for cell in row:
print(lowerCamelCase__, end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__snake_case : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__snake_case : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Tuple ):
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.feature_extractor
_a = hf_model.adapter
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hf_model.config.feat_extract_norm == "group", )
_a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : int, lowerCamelCase__ : str, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any] ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple ):
_a = full_name.split("adaptor." )[-1]
_a = name.split("." )
if items[1].isdigit():
_a = int(items[1] )
else:
_a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_a = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_a = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_a = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_a = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_a = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Any ):
_a , _a = emb.weight.shape
_a = nn.Linear(lowerCamelCase__, lowerCamelCase__, bias=lowerCamelCase__ )
_a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : int, lowerCamelCase__ : str, lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : int, lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[Any], ):
_a = WavaVecaConfig.from_pretrained(
lowerCamelCase__, add_adapter=lowerCamelCase__, adapter_stride=lowerCamelCase__, adapter_kernel_size=lowerCamelCase__, use_auth_token=lowerCamelCase__, output_hidden_size=lowerCamelCase__, )
_a = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
}, )
_a = model[0].eval()
# load feature extractor
_a = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__, use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
_a = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder, lowerCamelCase__ )
# load decoder weights
_a = MBartForCausalLM(lowerCamelCase__ )
_a , _a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=lowerCamelCase__ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_a = SpeechEncoderDecoderModel(encoder=lowerCamelCase__, decoder=lowerCamelCase__ )
_a = False
_a = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
_a = hf_wavavec.config.to_dict()
_a = tokenizer.pad_token_id
_a = tokenizer.bos_token_id
_a = tokenizer.eos_token_id
_a = "mbart50"
_a = "wav2vec2"
_a = tokenizer.eos_token_id
_a = 250_004
_a = tokenizer.eos_token_id
_a = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
__snake_case : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__snake_case : Union[str, Any] = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case : Union[str, Any] = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
__snake_case : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
__snake_case : Tuple = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> List[Any]:
if return_pvalue:
_a = pearsonr(snake_case_ , snake_case_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case_ , snake_case_ )[0] )}
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : List[str] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """mgp-str"""
def __init__( self , snake_case_=[3_2, 1_2_8] , snake_case_=4 , snake_case_=3 , snake_case_=2_7 , snake_case_=3_8 , snake_case_=5_0_2_5_7 , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
_a = image_size
_a = patch_size
_a = num_channels
_a = max_token_length
_a = num_character_labels
_a = num_bpe_labels
_a = num_wordpiece_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = mlp_ratio
_a = distilled
_a = layer_norm_eps
_a = drop_rate
_a = qkv_bias
_a = attn_drop_rate
_a = drop_path_rate
_a = output_aa_attentions
_a = initializer_range
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
import os
from math import logaa
def _lowercase ( lowerCamelCase__ : str = "base_exp.txt" ):
_a = 0
_a = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ), lowerCamelCase__ ) ) ):
_a , _a = list(map(lowerCamelCase__, line.split("," ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
_a = x * logaa(lowerCamelCase__ )
_a = i + 1
return result
if __name__ == "__main__":
print(solution())
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
_a = 0
_a = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_a = i + 1
else:
_a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( a ):
__UpperCAmelCase : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__UpperCAmelCase : Optional[Any] = """document_qa"""
__UpperCAmelCase : str = AutoProcessor
__UpperCAmelCase : Any = VisionEncoderDecoderModel
__UpperCAmelCase : List[str] = ["""image""", """text"""]
__UpperCAmelCase : Optional[int] = ["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_a = task_prompt.replace("{user_input}" , snake_case_ )
_a = self.pre_processor.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors="pt" ).input_ids
_a = self.pre_processor(snake_case_ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case_ , ).sequences
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
_a = self.pre_processor.batch_decode(snake_case_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_a = re.sub(R"<.*?>" , "" , snake_case_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(snake_case_ )
return sequence["answer"]
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list ):
if len(lowerCamelCase__ ) < 2:
return collection
def circle_sort_util(lowerCamelCase__ : list, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> bool:
_a = False
if low == high:
return swapped
_a = low
_a = high
while left < right:
if collection[left] > collection[right]:
_a , _a = (
collection[right],
collection[left],
)
_a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_a , _a = (
collection[right + 1],
collection[left],
)
_a = True
_a = low + int((high - low) / 2 )
_a = circle_sort_util(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_a = circle_sort_util(lowerCamelCase__, mid + 1, lowerCamelCase__ )
return swapped or left_swap or right_swap
_a = True
while is_not_sorted is True:
_a = circle_sort_util(lowerCamelCase__, 0, len(lowerCamelCase__ ) - 1 )
return collection
if __name__ == "__main__":
__snake_case : List[Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case : List[Any] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[int]=False ):
_a = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_a = ""
else:
_a = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_a = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def _lowercase ( lowerCamelCase__ : Dict ):
_a = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any] ):
_a = dct.pop(lowerCamelCase__ )
_a = val
def _lowercase ( ):
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any]=False ):
_a = BitConfig(
global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=lowerCamelCase__, )
_a = ViTHybridConfig(backbone_config=lowerCamelCase__, image_size=384, num_labels=1_000 )
_a = False
# load original model from timm
_a = timm.create_model(lowerCamelCase__, pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase__ )
_a = create_rename_keys(lowerCamelCase__, lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_a = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a = ViTHybridModel(lowerCamelCase__ ).eval()
else:
_a = ViTHybridForImageClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# create image processor
_a = create_transform(**resolve_data_config({}, model=lowerCamelCase__ ) )
_a = transform.transforms
_a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_a = ViTHybridImageProcessor(
do_resize=lowerCamelCase__, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCamelCase__, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=lowerCamelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_a = prepare_img()
_a = transform(lowerCamelCase__ ).unsqueeze(0 )
_a = processor(lowerCamelCase__, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ )
# verify logits
with torch.no_grad():
_a = model(lowerCamelCase__ )
_a = outputs.logits
print("Predicted class:", logits.argmax(-1 ).item() )
if base_model:
_a = timm_model.forward_features(lowerCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase__, outputs.pooler_output, atol=1e-3 )
else:
_a = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__, outputs.logits, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__snake_case : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( a , unittest.TestCase ):
__UpperCAmelCase : int = KandinskyImgaImgPipeline
__UpperCAmelCase : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
__UpperCAmelCase : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__UpperCAmelCase : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__UpperCAmelCase : Optional[int] = False
@property
def __lowerCAmelCase ( self ) -> Dict:
return 3_2
@property
def __lowerCAmelCase ( self ) -> int:
return 3_2
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return 1_0_0
@property
def __lowerCAmelCase ( self ) -> Tuple:
_a = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a = MultilingualCLIP(snake_case_ )
_a = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
_a = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_a = UNetaDConditionModel(**snake_case_ )
return model
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_unet
_a = self.dummy_movq
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_a = DDIMScheduler(**snake_case_ )
_a = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Tuple:
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
_a = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
_a = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_a = pipe(**self.get_dummy_inputs(snake_case_ ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_a = "A red cartoon frog, 4k"
_a = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
_a = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_a = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a , _a = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_a = pipeline(
snake_case_ , image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
_a = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__snake_case : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__snake_case : Tuple = logging.getLogger(__name__)
__snake_case : List[str] = {"facebook/bart-base": BartForConditionalGeneration}
__snake_case : List[Any] = {"facebook/bart-base": BartTokenizer}
def _lowercase ( ):
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file", type=lowerCamelCase__, default=lowerCamelCase__, help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length", type=lowerCamelCase__, default=5, help="The maximum total input sequence length after tokenization.", )
parser.add_argument(
"--num_beams", type=lowerCamelCase__, default=lowerCamelCase__, help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
), )
parser.add_argument(
"--model_name_or_path", type=lowerCamelCase__, help="Path to pretrained model or model identifier from huggingface.co/models.", required=lowerCamelCase__, )
parser.add_argument(
"--config_name", type=lowerCamelCase__, default=lowerCamelCase__, help="Pretrained config name or path if not the same as model_name", )
parser.add_argument(
"--device", type=lowerCamelCase__, default="cpu", help="Device where the model will be run", )
parser.add_argument("--output_file_path", type=lowerCamelCase__, default=lowerCamelCase__, help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[Any]="cpu" ):
_a = model_dict[model_name].from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_a = tokenizer_dict[model_name].from_pretrained(lowerCamelCase__ )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any] ):
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowerCamelCase__ ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1_024, return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], num_beams=lowerCamelCase__, max_length=lowerCamelCase__, early_stopping=lowerCamelCase__, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
lowerCamelCase__, (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
), lowerCamelCase__, opset_version=14, input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"], output_names=["output_ids"], dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
}, example_outputs=lowerCamelCase__, )
logger.info("Model exported to {}".format(lowerCamelCase__ ) )
_a = remove_dup_initializers(os.path.abspath(lowerCamelCase__ ) )
logger.info("Deduplicated and optimized model written to {}".format(lowerCamelCase__ ) )
_a = onnxruntime.InferenceSession(lowerCamelCase__ )
_a = ort_sess.run(
lowerCamelCase__, {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowerCamelCase__ ),
"max_length": np.array(lowerCamelCase__ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1e-3, atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowercase ( ):
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path, lowerCamelCase__ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowerCamelCase__ )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A ( a ):
__UpperCAmelCase : bool = field(default=a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
_a = v.to_dict()
return d
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
import math
class A :
def __init__( self , snake_case_=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
_a = n
_a = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # adjacency matrix for weight
_a = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
_a = w
def __lowerCAmelCase ( self ) -> List[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
return self.dp[u][v]
if __name__ == "__main__":
__snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowercase ( lowerCamelCase__ : int ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
_a = "mock-s3-bucket"
_a = F'''s3://{mock_bucket}'''
_a = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path.startswith("s3://" ) is False
_a = "./local/path"
_a = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path == new_dataset_path
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is True
_a = fsspec.filesystem("file" )
_a = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int] ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
_a = input_paths[compression_fs_class.protocol]
if input_path is None:
_a = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
_a = fsspec.filesystem(compression_fs_class.protocol, fo=lowerCamelCase__ )
assert isinstance(lowerCamelCase__, lowerCamelCase__ )
_a = os.path.basename(lowerCamelCase__ )
_a = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowerCamelCase__, "r", encoding="utf-8" ) as f, open(lowerCamelCase__, encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : int, lowerCamelCase__ : str ):
_a = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
_a = compressed_file_paths[protocol]
_a = "dataset.jsonl"
_a = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
_a , *_a = fsspec.get_fs_token_paths(lowerCamelCase__ )
assert fs.isfile(lowerCamelCase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[Any] ):
_a = hf_api.dataset_info(lowerCamelCase__, token=lowerCamelCase__ )
_a = HfFileSystem(repo_info=lowerCamelCase__, token=lowerCamelCase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowerCamelCase__ ) as f:
assert hffs.open("data/text_data.txt", "r" ).read() == f.read()
def _lowercase ( ):
_a = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase__, lowerCamelCase__, clobber=lowerCamelCase__ )
with pytest.warns(lowerCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int = 3, lowerCamelCase__ : int = 7, lowerCamelCase__ : int = 1_000_000 ):
_a = 0
_a = 1
for current_denominator in range(1, limit + 1 ):
_a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_a = current_numerator
_a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , snake_case_=2 , ) -> List[str]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_a = (image_size // patch_size) ** 2
_a = num_patches + 2
def __lowerCAmelCase ( self ) -> List[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Tuple:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = TFDeiTModel(config=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_a = TFDeiTForMaskedImageModeling(config=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = TFDeiTForMaskedImageModeling(snake_case_ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = self.type_sequence_label_size
_a = TFDeiTForImageClassification(snake_case_ )
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = TFDeiTForImageClassification(snake_case_ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Dict = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = TFDeiTModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> Dict:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Optional[int]:
_a = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> Tuple:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFDeiTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Any:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=snake_case_ , return_tensors="tf" )
# forward pass
_a = model(**snake_case_ )
# verify the logits
_a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_a = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( a ):
__UpperCAmelCase : int = """Wav2Vec2FeatureExtractor"""
__UpperCAmelCase : str = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
super().__init__(snake_case_ , snake_case_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , **snake_case_ ) -> List[str]:
try:
return super().from_pretrained(snake_case_ , **snake_case_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(snake_case_ , **snake_case_ )
_a = WavaVecaCTCTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(feature_extractor=snake_case_ , tokenizer=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_a = kwargs.pop("raw_speech" )
else:
_a = kwargs.pop("audio" , snake_case_ )
_a = kwargs.pop("sampling_rate" , snake_case_ )
_a = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_a = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
_a = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings["input_ids"]
return inputs
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
_a = kwargs.pop("input_features" , snake_case_ )
_a = kwargs.pop("labels" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
_a = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels["input_ids"]
return input_features
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> str:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __lowerCAmelCase ( self ) -> Dict:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
__snake_case : Dict = range(2, 20 + 1)
__snake_case : Any = [10**k for k in range(ks[-1] + 1)]
__snake_case : dict[int, dict[int, list[list[int]]]] = {}
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str ):
_a = sum(a_i[j] for j in range(lowerCamelCase__, len(lowerCamelCase__ ) ) )
_a = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ), lowerCamelCase__ ) ) )
_a , _a = 0, 0
_a = n - i
_a = memo.get(lowerCamelCase__ )
if sub_memo is not None:
_a = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
_a = -1
for _k in range(len(lowerCamelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a = _k
break
if max_jump >= 0:
_a , _a , _a = jumps[max_jump]
# since the difference between jumps is cached, add c
_a = diff + c
for j in range(min(lowerCamelCase__, len(lowerCamelCase__ ) ) ):
_a , _a = divmod(lowerCamelCase__, 10 )
if new_c > 0:
add(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
else:
_a = []
else:
_a = {c: []}
_a = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a = next_term(lowerCamelCase__, k - 1, i + dn, lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a = compute(lowerCamelCase__, lowerCamelCase__, i + dn, lowerCamelCase__ )
diff += _diff
dn += terms_jumped
_a = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__, (diff, dn, k) )
return (diff, dn)
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a = i
_a , _a , _a = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a = ds_c + ds_b
diff += addend
_a = 0
for j in range(lowerCamelCase__ ):
_a = a_i[j] + addend
_a , _a = divmod(lowerCamelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return diff, i - start_i
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str] ):
for j in range(lowerCamelCase__, len(lowerCamelCase__ ) ):
_a = digits[j] + addend
if s >= 10:
_a , _a = divmod(lowerCamelCase__, 10 )
_a = addend // 10 + quotient
else:
_a = s
_a = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a = divmod(lowerCamelCase__, 10 )
digits.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : int = 10**15 ):
_a = [1]
_a = 1
_a = 0
while True:
_a , _a = next_term(lowerCamelCase__, 20, i + dn, lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
_a = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A ( a ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaises(snake_case_ ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self ) -> List[Any]:
with self.assertRaises(snake_case_ ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import PIL.Image
_a = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=snake_case_ ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_a , _a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , snake_case_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : int ):
_a = pa.BufferReader(lowerCamelCase__ ) if isinstance(lowerCamelCase__, pa.Buffer ) else pa.memory_map(lowerCamelCase__ )
_a = pa.ipc.open_stream(lowerCamelCase__ )
_a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Dict ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ):
_a = pa.BufferOutputStream()
_a = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowerCamelCase__, features=lowerCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue() )
_a = pa.ipc.open_stream(lowerCamelCase__ )
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase__ )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
with pytest.raises(lowerCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2] )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
with pytest.raises(lowerCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=10 )
writer.write({"col_1": "bar", "col_2": 2}, key=10 )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
writer.write({"col_1": "foo", "col_2": 1}, key=1 )
writer.write({"col_1": "bar", "col_2": 2}, key=2 )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Any ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
_a = os.path.join(lowerCamelCase__, "test.arrow" )
with ArrowWriter(path=lowerCamelCase__, schema=pa.schema(lowerCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(lowerCamelCase__, 1 )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
if pa.types.is_list(lowerCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
if isinstance(lst[0], lowerCamelCase__ ):
change_first_primitive_element_in_list(lst[0], lowerCamelCase__ )
else:
_a = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict ):
_a = pa.array(TypedSequence(lowerCamelCase__, optimized_int_type=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype", [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
], )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple ):
# in range
_a = pa.array(OptimizedTypedSequence(lowerCamelCase__, col=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(lowerCamelCase__ )
_a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase__, lowerCamelCase__ )
_a = pa.array(OptimizedTypedSequence(lowerCamelCase__, col=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception", [False, True] )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Any ):
_a = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowerCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowercase ( lowerCamelCase__ : Tuple ):
_a = "mock://dataset-train.arrow"
with ArrowWriter(path=lowerCamelCase__, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase__ )
def _lowercase ( ):
_a = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowerCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files", [False, True] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any ):
import PIL.Image
_a = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCamelCase__, format="png" )
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase__, features=Features({"image": Image()} ), embed_local_files=lowerCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowerCamelCase__ )
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"], lowerCamelCase__ )
with open(lowerCamelCase__, "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowercase ( ):
_a = pa.schema([pa.field("col_1", pa.string(), nullable=lowerCamelCase__ )] )
_a = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase__ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1", pa.string() )] )
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Optional[int] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Any ):
_a = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[Any] ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_a = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_a = in_proj_weight[
: encoder_config.hidden_size, :
]
_a = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_a = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str ):
_a = dct.pop(lowerCamelCase__ )
_a = val
def _lowercase ( lowerCamelCase__ : List[Any] ):
if "handwritten" in checkpoint_url:
_a = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_a = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict ):
_a = ViTConfig(image_size=384, qkv_bias=lowerCamelCase__ )
_a = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_a = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_a = 1_024
_a = 4_096
_a = 24
_a = 16
_a = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_a = False
_a = "relu"
_a = 1_024
_a = True
_a = False
_a = False
# load HuggingFace model
_a = ViTModel(lowerCamelCase__, add_pooling_layer=lowerCamelCase__ )
_a = TrOCRForCausalLM(lowerCamelCase__ )
_a = VisionEncoderDecoderModel(encoder=lowerCamelCase__, decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
_a = torch.hub.load_state_dict_from_url(lowerCamelCase__, map_location="cpu", check_hash=lowerCamelCase__ )["model"]
_a = create_rename_keys(lowerCamelCase__, lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__, lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_a = state_dict.pop(lowerCamelCase__ )
if key.startswith("decoder" ) and "output_projection" not in key:
_a = val
else:
_a = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
_a = ViTImageProcessor(size=encoder_config.image_size )
_a = RobertaTokenizer.from_pretrained("roberta-large" )
_a = TrOCRProcessor(lowerCamelCase__, lowerCamelCase__ )
_a = processor(images=prepare_img(lowerCamelCase__ ), return_tensors="pt" ).pixel_values
# verify logits
_a = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_a = model(pixel_values=lowerCamelCase__, decoder_input_ids=lowerCamelCase__ )
_a = outputs.logits
_a = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_a = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
_a = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
_a = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
_a = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], lowerCamelCase__, atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__snake_case : str = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def _lowercase ( ):
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__snake_case : Any = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__snake_case : Dict = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__snake_case : Any = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__snake_case : Optional[Any] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__snake_case : Any = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str] ):
for tf_name, hf_name in patterns:
_a = k.replace(lowerCamelCase__, lowerCamelCase__ )
return k
def _lowercase ( lowerCamelCase__ : dict, lowerCamelCase__ : dict ):
_a = BigBirdPegasusConfig(**lowerCamelCase__ )
_a = BigBirdPegasusForConditionalGeneration(lowerCamelCase__ )
_a = torch_model.state_dict()
_a = {}
# separating decoder weights
_a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
_a = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
_a = DECODER_PATTERNS
_a = rename_state_dict_key(lowerCamelCase__, lowerCamelCase__ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowerCamelCase__ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
_a = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
_a = REMAINING_PATTERNS
_a = rename_state_dict_key(lowerCamelCase__, lowerCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowerCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_a = mapping["model.embed_positions.weight"]
_a = mapping.pop("model.embed_positions.weight" )
_a , _a = torch_model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
_a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( lowerCamelCase__ : Any ):
_a = tf.train.list_variables(lowerCamelCase__ )
_a = {}
_a = ["global_step"]
for name, shape in tqdm(lowerCamelCase__, desc="converting tf checkpoint to dict" ):
_a = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
_a = array
return tf_weights
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str, lowerCamelCase__ : dict ):
_a = get_tf_weights_as_numpy(lowerCamelCase__ )
_a = convert_bigbird_pegasus(lowerCamelCase__, lowerCamelCase__ )
torch_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__snake_case : Tuple = parser.parse_args()
__snake_case : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self , snake_case_ ) -> List[str]:
super().__init__()
_a = model
_a = 2
_a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCAmelCase ( self ) -> int:
pass
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str, lowerCamelCase__ : str ):
# load longformer model from model identifier
_a = LongformerModel.from_pretrained(lowerCamelCase__ )
_a = LightningModel(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__, map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_a = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , a , )
class A ( a ):
__UpperCAmelCase : Tuple = RobertaConfig
__UpperCAmelCase : Any = """roberta"""
def __init__( self , snake_case_ ) -> str:
super().__init__(snake_case_ )
_a = RobertaEmbeddings(snake_case_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , a , )
class A ( a ):
__UpperCAmelCase : Any = RobertaConfig
__UpperCAmelCase : int = """roberta"""
def __init__( self , snake_case_ ) -> Tuple:
super().__init__(snake_case_ )
_a = config.num_labels
_a = config.num_hidden_layers
_a = DeeRobertaModel(snake_case_ )
_a = nn.Dropout(config.hidden_dropout_prob )
_a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case_ )
def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=-1 , snake_case_=False , ) -> Dict:
_a = self.num_layers
try:
_a = self.roberta(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
_a = outputs[1]
_a = self.dropout(snake_case_ )
_a = self.classifier(snake_case_ )
_a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_a = e.message
_a = e.exit_layer
_a = outputs[0]
if not self.training:
_a = entropy(snake_case_ )
_a = []
_a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_a = []
for highway_exit in outputs[-1]:
_a = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
_a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_a = (loss,) + outputs
if not self.training:
_a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Tuple = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> Dict:
_a = FlaxRobertaModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained("roberta-base" , from_pt=snake_case_ )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
__snake_case : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
__snake_case : List[Any] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__snake_case : Dict = BeautifulSoup(res.text, "html.parser")
__snake_case : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
import re
import string
import numpy as np
import datasets
__snake_case : Tuple = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__snake_case : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__snake_case : Optional[int] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_a = np.array([re.sub(A__ , "" , A__ ) for x in predictions] )
_a = np.array([re.sub(A__ , "" , A__ ) for x in references] )
else:
_a = np.asarray(A__ )
_a = np.asarray(A__ )
if ignore_case:
_a = np.char.lower(A__ )
_a = np.char.lower(A__ )
if ignore_punctuation:
_a = string.punctuation.maketrans("" , "" , string.punctuation )
_a = np.char.translate(A__ , table=A__ )
_a = np.char.translate(A__ , table=A__ )
if ignore_numbers:
_a = string.digits.maketrans("" , "" , string.digits )
_a = np.char.translate(A__ , table=A__ )
_a = np.char.translate(A__ , table=A__ )
_a = predictions == references
return {"exact_match": np.mean(A__ ) * 1_0_0}
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__snake_case : Any = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__snake_case : List[str] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A ( _snake_case ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = SqueezeBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase__ )
_a = do_lower_case
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> List[Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[str] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A ( snake_case__ ):
__UpperCAmelCase : Dict = """ctrl"""
__UpperCAmelCase : Optional[Any] = ["""past_key_values"""]
__UpperCAmelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=2_4_6_5_3_4 , snake_case_=2_5_6 , snake_case_=1_2_8_0 , snake_case_=8_1_9_2 , snake_case_=4_8 , snake_case_=1_6 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=0.02 , snake_case_=True , **snake_case_ , ) -> Dict:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = dff
_a = resid_pdrop
_a = embd_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
super().__init__(**UpperCAmelCase_ )
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case : Any = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowercase ( lowerCamelCase__ : Dict = "dhaka", lowerCamelCase__ : Any = 5 ):
_a = min(lowerCamelCase__, 50 ) # Prevent abuse!
_a = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
_a = requests.get("https://www.google.com/search", params=lowerCamelCase__, headers=lowerCamelCase__ )
_a = BeautifulSoup(html.text, "html.parser" )
_a = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);", str(soup.select("script" ) ) ) )
_a = json.dumps(lowerCamelCase__ )
_a = json.loads(lowerCamelCase__ )
_a = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",", lowerCamelCase__, )
if not matched_google_image_data:
return 0
_a = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]", "", str(lowerCamelCase__ ), )
_a = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]", lowerCamelCase__, )
for index, fixed_full_res_image in enumerate(lowerCamelCase__ ):
if index >= max_images:
return index
_a = bytes(lowerCamelCase__, "ascii" ).decode(
"unicode-escape" )
_a = bytes(lowerCamelCase__, "ascii" ).decode(
"unicode-escape" )
_a = urllib.request.build_opener()
_a = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowerCamelCase__ )
_a = F'''query_{query.replace(' ', '_' )}'''
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
urllib.request.urlretrieve( # noqa: S310
lowerCamelCase__, F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__snake_case : str = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_a = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_a = 4
_a = 48
_a = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_a = [6, 6, 6, 6]
_a = 60
_a = [6, 6, 6, 6]
_a = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_a = 4
_a = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_a = 1
_a = 1
_a = 126
_a = 7
_a = 255.0
_a = ''
return config
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Any ):
if "patch_embed.proj" in name and "layers" not in name:
_a = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_a = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
_a = name.replace("layers", "encoder.stages" )
if "residual_group.blocks" in name:
_a = name.replace("residual_group.blocks", "layers" )
if "attn.proj" in name:
_a = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn", "attention.self" )
if "norm1" in name:
_a = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2", "output.dense" )
if "q_bias" in name:
_a = name.replace("q_bias", "query.bias" )
if "k_bias" in name:
_a = name.replace("k_bias", "key.bias" )
if "v_bias" in name:
_a = name.replace("v_bias", "value.bias" )
if "cpb_mlp" in name:
_a = name.replace("cpb_mlp", "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj", "patch_embed.projection" )
if name == "norm.weight":
_a = 'layernorm.weight'
if name == "norm.bias":
_a = 'layernorm.bias'
if "conv_first" in name:
_a = name.replace("conv_first", "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_a = name.replace("conv_last", "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_a = name.replace("conv_before_upsample.0", "conv_before_upsample" )
if "upsample.0" in name:
_a = name.replace("upsample.0", "upsample.convolution_0" )
if "upsample.2" in name:
_a = name.replace("upsample.2", "upsample.convolution_1" )
_a = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_a = name.replace("upsample.0.weight", "upsample.conv.weight" )
_a = name.replace("upsample.0.bias", "upsample.conv.bias" )
else:
pass
else:
_a = 'swin2sr.' + name
return name
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[1] )
_a = int(key_split[4] )
_a = config.embed_dim
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
pass
else:
_a = val
return orig_state_dict
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any] ):
_a = get_config(_lowercase )
_a = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_a = torch.hub.load_state_dict_from_url(_lowercase, map_location="cpu" )
_a = convert_state_dict(_lowercase, _lowercase )
_a = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError("Missing keys when converting: {}".format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
_a = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_a = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert("RGB" )
_a = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_a = 126 if 'Jpeg' in checkpoint_url else 256
_a = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_a = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_a = pixel_values[:, 0, :, :].unsqueeze(1 )
_a = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_a = torch.Size([1, 3, 512, 512] )
_a = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_a = torch.Size([1, 3, 1_024, 1_024] )
_a = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_a = torch.Size([1, 3, 1_024, 1_024] )
_a = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_a = torch.Size([1, 3, 512, 512] )
_a = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_a = torch.Size([1, 3, 1_024, 1_024] )
_a = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1e-3 )
print("Looks ok!" )
_a = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_a = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__snake_case : int = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : int, lowerCamelCase__ : Optional[int]=None ):
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_a = nn.Parameter(_A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_a = nn.Parameter(_A )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str ):
_a = np.asarray(weights[0] )
_a = np.asarray(weights[1] )
_a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(_A ).transpose(1, 2 ).contiguous().view(-1, _A ), )
set_param(
torch_layer.self_attention.value, torch.tensor(_A ).transpose(1, 2 ).contiguous().view(-1, _A ), )
set_param(
torch_layer.output.dense, torch.tensor(_A ).view(-1, _A ).contiguous().transpose(0, 1 ), )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[int] ):
_a = np.asarray(weights[0] )
_a = np.asarray(weights[1] )
_a = np.asarray(weights[2] )
_a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(_A ).transpose(1, 2 ).contiguous().view(-1, _A ), )
set_param(
torch_layer.self_attention.key, torch.tensor(_A ).transpose(1, 2 ).contiguous().view(-1, _A ), )
set_param(
torch_layer.self_attention.value, torch.tensor(_A ).transpose(1, 2 ).contiguous().view(-1, _A ), )
set_param(
torch_layer.output.dense, torch.tensor(_A ).view(-1, _A ).contiguous().transpose(0, 1 ), )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str, lowerCamelCase__ : Dict ):
_a = weights[0][0][0]
_a = np.asarray(layer_norm_a[0] )
_a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(_A ), torch.tensor(_A ), )
# lsh weights + output
_a = weights[0][1]
if len(_A ) < 4:
set_layer_weights_in_torch_lsh(_A, torch_block.attention, _A )
else:
set_layer_weights_in_torch_local(_A, torch_block.attention, _A )
# intermediate weighs
_a = weights[2][0][1][2]
# Chunked Feed Forward
if len(_A ) == 4:
_a = intermediate_weights[2]
# layernorm 2
_a = np.asarray(intermediate_weights[0][0] )
_a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(_A ), torch.tensor(_A ), )
# intermediate dense
_a = np.asarray(intermediate_weights[1][0] )
_a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(_A ).transpose(0, 1 ).contiguous(), torch.tensor(_A ), )
# intermediate out
_a = np.asarray(intermediate_weights[4][0] )
_a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(_A ).transpose(0, 1 ).contiguous(), torch.tensor(_A ), )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = torch_model.reformer
# word embeds
_a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(_A ), )
if isinstance(weights[3], _A ):
_a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_a = nn.Parameter(torch.tensor(_A ) )
_a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_A, _A, _A )
# output layer norm
_a = np.asarray(weights[7][0] )
_a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(_A ), torch.tensor(_A ), )
# output embeddings
_a = np.asarray(weights[9][0] )
_a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(_A ).transpose(0, 1 ).contiguous(), torch.tensor(_A ), )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int] ):
_a = ReformerConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
_a = ReformerModelWithLMHead(_A )
with open(_A, "rb" ) as f:
_a = pickle.load(_A )["weights"]
set_model_weights_in_torch(_A, _A, config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__snake_case : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
__snake_case : List[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
__snake_case : str = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__snake_case : Tuple = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case : Tuple = [0] * args.vocab_size
for k, v in counter.items():
__snake_case : Optional[int] = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _lowercase ( lowerCamelCase__ : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(lowerCamelCase__, 0, lowerCamelCase__, args=(lowerCamelCase__) )[0]
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float ):
return math.pow(lowerCamelCase__, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : float ):
return 10 - x * x
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_a = a
while (b - a) >= 0.01:
# Find middle point
_a = (a + b) / 2
# Check if middle point is root
if equation(__lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) < 0:
_a = c
else:
_a = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
_a = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase__ )
if number < 1:
_a = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase__ )
_a = 1
for i in range(1, lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class A ( a ):
def __init__( self , *snake_case_ , **snake_case_ ) -> int:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
import os
__snake_case : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = 0
_a = 0
while index < len(__lowerCAmelCase ) - 1:
_a = SYMBOLS[numerals[index]]
_a = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = """"""
_a = num // 1_000
numerals += m_count * "M"
num %= 1_000
_a = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_a = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowercase ( lowerCamelCase__ : int = "/p089_roman.txt" ):
_a = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
_a = filea.readlines()
for line in lines:
_a = line.strip()
_a = parse_roman_numerals(__lowerCAmelCase )
_a = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int] ):
if nth_term == "":
return [""]
_a = int(__UpperCAmelCase )
_a = int(__UpperCAmelCase )
_a = []
for temp in range(int(__UpperCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1, int(__UpperCAmelCase ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[Any] = int(input("Enter the last number (nth term) of the P-Series"))
__snake_case : List[str] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowercase ( ):
_a = 9
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = kruskal(lowerCAmelCase__, lowerCAmelCase__ )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ )
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[Any]:
_a = (3, 3_2, 1_2_8)
_a = tempfile.mkdtemp()
# fmt: off
_a = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_a = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
_a = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
_a = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __lowerCAmelCase ( self , **snake_case_ ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Any:
_a = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_a = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
_a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = self.prepare_image_inputs()
_a = image_processor(_UpperCamelCase , return_tensors="np" )
_a = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = """test"""
_a = processor(text=_UpperCamelCase )
_a = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = """test"""
_a = self.prepare_image_inputs()
_a = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.char_decode(_UpperCamelCase )
_a = tokenizer.batch_decode(_UpperCamelCase )
_a = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = None
_a = self.prepare_image_inputs()
_a = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_a = torch.randn(1 , 2_7 , 3_8 )
_a = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_a = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case : List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A ( a__ ):
__UpperCAmelCase : Any = field(default=a__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
__UpperCAmelCase : str = field(
default=a__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : int = field(
default=a__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__UpperCAmelCase : str = field(
default=a__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__UpperCAmelCase : Tuple = field(
default=a__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCAmelCase ( self ) -> List[str]:
_a = super().to_dict()
for k, v in d.items():
if isinstance(_A , _A ):
_a = v.to_dict()
return d
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class A ( _A ):
__UpperCAmelCase : Optional[int] = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : Union[str, Any] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
__UpperCAmelCase : List[str] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
__UpperCAmelCase : Dict = """question"""
__UpperCAmelCase : str = """context"""
__UpperCAmelCase : Dict = """answers"""
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( _UpperCAmelCase ):
__UpperCAmelCase : Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
__UpperCAmelCase : Optional[Any] = """CIDAS/clipseg-rd64-refined"""
__UpperCAmelCase : Dict = """image_segmenter"""
__UpperCAmelCase : List[Any] = CLIPSegForImageSegmentation
__UpperCAmelCase : List[str] = ["""image""", """text"""]
__UpperCAmelCase : str = ["""image"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors="pt" )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
with torch.no_grad():
_a = self.model(**lowerCamelCase_ ).logits
return logits
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = outputs.cpu().detach().numpy()
_a = 0
_a = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = (DDPMScheduler,)
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__UpperCamelCase )
return config
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def __lowerCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> Any:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
_a = len(__UpperCamelCase )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_a = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__UpperCamelCase ) )
_a = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __lowerCAmelCase ( self ) -> Dict:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**__UpperCamelCase )
_a = len(__UpperCamelCase )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_a = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__UpperCamelCase ) )
_a = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
_a = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
_a = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(__UpperCamelCase )
_a = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
_a = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__UpperCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
_a = [1_0_0, 8_7, 5_0, 1, 0]
_a = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__UpperCamelCase )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
__snake_case : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__snake_case : Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__snake_case : Optional[int] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ):
assert len(str(lowerCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a = year // 100
_a = (5 * (century % 4) + 2) % 7
_a = year % 100
_a = centurian % 12
_a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
_a = tempfile.mkdtemp()
# fmt: off
_a = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_a = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_a = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_a = {"""unk_token""": """<unk>"""}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
_a = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_a = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __lowerCAmelCase ( self , **snake_case_ ) -> Any:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __lowerCAmelCase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
_a = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> int:
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = self.get_image_processor()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
_a = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
_a = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def __lowerCAmelCase ( self ) -> List[str]:
_a = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
_a = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_a = self.prepare_image_inputs()
_a = image_processor(_lowercase , return_tensors="np" )
_a = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_a = """lower newer"""
_a = processor(text=_lowercase )
_a = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_a = """lower newer"""
_a = self.prepare_image_inputs()
_a = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(_lowercase )
_a = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_a = """lower newer"""
_a = self.prepare_image_inputs()
_a = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__snake_case : List[str] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class A ( _lowercase ):
__UpperCAmelCase : Tuple = '''albert'''
def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-1_2 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ) -> Any:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
_a = vocab_size
_a = embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_hidden_groups
_a = num_attention_heads
_a = inner_group_num
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = classifier_dropout_prob
_a = position_embedding_type
class A ( _lowercase ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
requests.request("GET", "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET", "https://huggingface.co", timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET", "https://huggingface.co" )
def _lowercase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case : List[str] = 16
__snake_case : Any = 32
def _lowercase ( lowerCamelCase__ : Accelerator, lowerCamelCase__ : int = 16, lowerCamelCase__ : str = "bert-base-cased" ):
_a = AutoTokenizer.from_pretrained(snake_case_ )
_a = load_dataset("glue", "mrpc" )
def tokenize_function(lowerCamelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a = datasets.map(
snake_case_, batched=snake_case_, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(lowerCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(snake_case_, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
_a = DataLoader(
tokenized_datasets["validation"], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : int ):
# Initialize accelerator
_a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = args.model_name_or_path
set_seed(snake_case_ )
_a = get_dataloaders(snake_case_, snake_case_, snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained(snake_case_, return_dict=snake_case_ )
# Instantiate optimizer
_a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a = optimizer_cls(params=model.parameters(), lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
_a = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_a = 1
_a = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a = get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=0, num_training_steps=snake_case_, )
else:
_a = DummyScheduler(snake_case_, total_num_steps=snake_case_, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a = accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# We need to keep track of how many total steps we have iterated over
_a = 0
# We also need to keep track of the stating epoch so files are named properly
_a = 0
# Now we train the model
_a = evaluate.load("glue", "mrpc" )
_a = 0
_a = {}
for epoch in range(snake_case_, snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
_a = model(**snake_case_ )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**snake_case_ )
_a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', snake_case_ )
_a = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_a = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(snake_case_, snake_case_ )
def _lowercase ( ):
_a = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=snake_case_, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=snake_case_, )
parser.add_argument(
"--output_dir", type=snake_case_, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=snake_case_, default=snake_case_, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=snake_case_, default=3, help="Number of train epochs.", )
_a = parser.parse_args()
_a = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_a = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
_a = InstructBlipProcessor(snake_case_ , snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def __lowerCAmelCase ( self , **snake_case_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).qformer_tokenizer
def __lowerCAmelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
_a = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = self.get_qformer_tokenizer()
_a = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
_a = self.prepare_image_inputs()
_a = image_processor(snake_case_ , return_tensors="np" )
_a = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = self.get_qformer_tokenizer()
_a = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
_a = "lower newer"
_a = processor(text=snake_case_ )
_a = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
_a = qformer_tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = self.get_qformer_tokenizer()
_a = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
_a = "lower newer"
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = self.get_qformer_tokenizer()
_a = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(snake_case_ )
_a = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = self.get_qformer_tokenizer()
_a = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
_a = "lower newer"
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A ( __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : Dict = """char"""
__UpperCAmelCase : List[Any] = """bpe"""
__UpperCAmelCase : Optional[Any] = """wp"""
__snake_case : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A ( __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : Optional[int] = ["""image_processor""", """char_tokenizer"""]
__UpperCAmelCase : Union[str, Any] = """ViTImageProcessor"""
__UpperCAmelCase : Union[str, Any] = """MgpstrTokenizer"""
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> int:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_a = tokenizer
_a = AutoTokenizer.from_pretrained("gpt2" )
_a = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(_a , _a )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[int]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_a = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None:
_a = self.char_tokenizer(_a , return_tensors=_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
_a = encodings["""input_ids"""]
return inputs
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = sequences
_a = char_preds.size(0 )
_a = self._decode_helper(_a , "char" )
_a = self._decode_helper(_a , "bpe" )
_a = self._decode_helper(_a , "wp" )
_a = []
_a = []
for i in range(_a ):
_a = [char_scores[i], bpe_scores[i], wp_scores[i]]
_a = [char_strs[i], bpe_strs[i], wp_strs[i]]
_a = scores.index(max(_a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_a = {}
_a = final_strs
_a = final_scores
_a = char_strs
_a = bpe_strs
_a = wp_strs
return out
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
if format == DecodeType.CHARACTER:
_a = self.char_decode
_a = 1
_a = """[s]"""
elif format == DecodeType.BPE:
_a = self.bpe_decode
_a = 2
_a = """#"""
elif format == DecodeType.WORDPIECE:
_a = self.wp_decode
_a = 1_0_2
_a = """[SEP]"""
else:
raise ValueError(F'''Format {format} is not supported.''' )
_a = [], []
_a = pred_logits.size(0 )
_a = pred_logits.size(1 )
_a = pred_logits.topk(1 , dim=-1 , largest=_a , sorted=_a )
_a = preds_index.view(-1 , _a )[:, 1:]
_a = decoder(_a )
_a = torch.nn.functional.softmax(_a , dim=2 ).max(dim=2 )
_a = preds_max_prob[:, 1:]
for index in range(_a ):
_a = preds_str[index].find(_a )
_a = preds_str[index][:pred_eos]
_a = preds_index[index].cpu().tolist()
_a = pred_index.index(_a ) if eos_token in pred_index else -1
_a = preds_max_prob[index][: pred_eos_index + 1]
_a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_a )
conf_scores.append(_a )
return dec_strs, conf_scores
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(_a )]
return decode_strs
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
return self.bpe_tokenizer.batch_decode(_a )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(_a )]
return decode_strs
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
_a = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def _lowercase ( lowerCamelCase__ : int ):
_a = 0
while number > 0:
_a = number % 10
sum_of_digits += last_digit
_a = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _lowercase ( lowerCamelCase__ : int = 100 ):
_a = factorial(snake_case__ )
_a = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip()))) | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : List[str] = StableDiffusionDiffEditPipeline
__UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__UpperCAmelCase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Optional[int] = frozenset([] )
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
_a = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCAmelCase , set_alpha_to_zero=__lowerCAmelCase , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
_a = CLIPTextModel(__lowerCAmelCase )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Tuple:
_a = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_a = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith("mps" ):
_a = torch.manual_seed(__lowerCAmelCase )
else:
_a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_a = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Any:
_a = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" )
if str(__lowerCAmelCase ).startswith("mps" ):
_a = torch.manual_seed(__lowerCAmelCase )
else:
_a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_a = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[Any]:
_a = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" )
if str(__lowerCAmelCase ).startswith("mps" ):
_a = torch.manual_seed(__lowerCAmelCase )
else:
_a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_a = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
_a = self.get_dummy_components()
_a = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_a = self.get_dummy_inputs(__lowerCAmelCase )
_a = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_a = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_a = self.get_dummy_inputs(__lowerCAmelCase )
_a = pipe_loaded(**__lowerCAmelCase )[0]
_a = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_a = self.get_dummy_mask_inputs(__lowerCAmelCase )
_a = pipe.generate_mask(**__lowerCAmelCase )
_a = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_a = np.array([0] * 9 )
_a = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCAmelCase ( self ) -> str:
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_a = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_a = pipe.invert(**__lowerCAmelCase ).images
_a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_a = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "cpu"
_a = self.get_dummy_components()
_a = {"beta_start": 0.00_085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
_a = DPMSolverMultistepScheduler(**__lowerCAmelCase )
_a = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase )
_a = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_a = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_a = pipe.invert(**__lowerCAmelCase ).images
_a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_a = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
_a = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) )
_a = raw_image
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = torch.manual_seed(0 )
_a = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_a = DDIMScheduler.from_config(pipe.scheduler.config )
_a = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_a = "a bowl of fruit"
_a = "a bowl of pears"
_a = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_a = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase ).latents
_a = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
_a = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCAmelCase ( self ) -> List[Any]:
_a = torch.manual_seed(0 )
_a = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_a = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_a = "a bowl of fruit"
_a = "a bowl of pears"
_a = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_a = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase , num_inference_steps=2_5 , ).latents
_a = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="numpy" , ).images[0]
_a = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : List[Any] = logging.get_logger(__name__)
class A ( _UpperCAmelCase ):
__UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PIL.Image.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> Any:
super().__init__(**lowercase_ )
_a = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
_a = get_size_dict(lowercase_ )
_a = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a = get_size_dict(lowercase_ , param_name="crop_size" )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BICUBIC , snake_case_ = None , **snake_case_ , ) -> str:
_a = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> Dict:
_a = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> int:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> Any:
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowercase_ )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowercase_ , param_name="crop_size" )
_a = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
_a = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_a = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : int ):
_a = word.split()
def justify(lowerCamelCase__ : list, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> str:
_a = max_width - width
_a = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_a = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_a = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_a = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_a = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_a = []
_a = []
_a = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_a = [word], len(_lowercase )
_a = max_width - width - len(_lowercase )
answer.append(" ".join(_lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_a = ""
while len(UpperCAmelCase__ ) % 3 != 0:
_a = "0" + bin_string
_a = [
bin_string[index : index + 3]
for index in range(len(UpperCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_a = 0
for index, val in enumerate(UpperCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCAmelCase__ ) )
oct_string += str(UpperCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : Tuple ):
if not isinstance(_snake_case, _snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str = 600_851_475_143 ):
try:
_a = int(_lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 1
_a = 2
while i * i <= n:
while n % i == 0:
_a = i
n //= i
i += 1
if n > 1:
_a = n
return int(_lowercase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__snake_case : Tuple = object()
# For specifying empty leaf dict `{}`
__snake_case : Optional[Any] = object()
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str] ):
_a = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
_a = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase, ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
def replace(lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(__UpperCamelCase, __UpperCamelCase ):
return replacement
return val
return replace
def _lowercase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowercase ( lowerCamelCase__ : Dict ):
_a = _get_partition_rules()
_a = _replacement_rules(__UpperCamelCase )
_a = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
_a = {k: replace(__UpperCamelCase, __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : List[Any] = 10 ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 28_433 * (pow(2, 7_830_457, lowerCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[Any] ):
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_, x % y )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : List[Any] ):
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def _lowercase ( lowerCamelCase__ : str = 20 ):
_a = 1
for i in range(1, n + 1 ):
_a = lcm(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A ( __snake_case ):
__UpperCAmelCase : int = 'xlm-roberta-xl'
def __init__( self , snake_case_=2_5_0_8_8_0 , snake_case_=2_5_6_0 , snake_case_=3_6 , snake_case_=3_2 , snake_case_=1_0_2_4_0 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_4 , snake_case_=1 , snake_case_=0.02 , snake_case_=1E-0_5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> int:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> List[str]:
_a = parent
_a = 1_3
_a = 7
_a = True
_a = True
_a = True
_a = True
_a = 9_9
_a = 3_8_4
_a = 2
_a = 4
_a = 3_7
_a = "gelu"
_a = 0.1
_a = 0.1
_a = 5_1_2
_a = 1_6
_a = 2
_a = 0.02
_a = 3
_a = 4
_a = 1_2_8
_a = 2
_a = 9
_a = 1
_a = None
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = TFConvBertModel(config=snake_case_ )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_a = TFConvBertForMaskedLM(config=snake_case_ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_a = self.num_labels
_a = TFConvBertForSequenceClassification(config=snake_case_ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = self.num_choices
_a = TFConvBertForMultipleChoice(config=snake_case_ )
_a = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_a = self.num_labels
_a = TFConvBertForTokenClassification(config=snake_case_ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_a = TFConvBertForQuestionAnswering(config=snake_case_ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__UpperCAmelCase : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : str = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = TFConvBertModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = True
if hasattr(snake_case_ , "use_cache" ):
_a = True
_a = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_a = getattr(self.model_tester , "key_length" , snake_case_ )
for model_class in self.all_model_classes:
_a = self._prepare_for_class(snake_case_ , snake_case_ )
_a = model_class(snake_case_ )
_a = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
_a = os.path.join(snake_case_ , "saved_model" , "1" )
_a = tf.keras.models.load_model(snake_case_ )
_a = model(snake_case_ )
if self.is_encoder_decoder:
_a = outputs["encoder_hidden_states"]
_a = outputs["encoder_attentions"]
else:
_a = outputs["hidden_states"]
_a = outputs["attentions"]
self.assertEqual(len(snake_case_ ) , snake_case_ )
_a = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self ) -> str:
_a = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_a = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_a = getattr(self.model_tester , "key_length" , snake_case_ )
_a = getattr(self.model_tester , "key_length" , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
_a = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
_a = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
_a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_a = True
_a = False
_a = model_class(snake_case_ )
_a = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
_a = model_class(snake_case_ )
_a = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(snake_case_ )
_a = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(snake_case_ )
_a = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> int:
_a = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(snake_case_ )[0]
_a = [1, 6, 7_6_8]
self.assertEqual(output.shape , snake_case_ )
_a = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Dict = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A ( UpperCAmelCase__ ):
__UpperCAmelCase : List[str] = "unispeech"
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=8_0 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=0.5 , **snake_case_ , ) -> Any:
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(lowerCamelCase__ )
_a = list(lowerCamelCase__ )
_a = list(lowerCamelCase__ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case : List[str] = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
__UpperCAmelCase : List[Any] = GPTSanJapaneseTokenizer
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __lowerCAmelCase ( self ) -> int:
super().setUp()
# fmt: off
_a = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
_a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
_a = {"""unk_token""": """<unk>"""}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def __lowerCAmelCase ( self , snake_case_ ) -> int:
_a = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
_a = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = self.get_input_output_texts(__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def __lowerCAmelCase ( self ) -> Tuple:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Any:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
# Testing tokenization
_a = """こんにちは、世界。 こんばんは、㔺界。"""
_a = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
_a = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_a = tokens + [tokenizer.unk_token]
_a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_tokenizer()
# Testing tokenization
_a = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
_a = """こんにちは、、、、世界。こんばんは、、、、世界。"""
_a = tokenizer.encode(__a )
_a = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_a = """こんにちは、世界。"""
_a = """こんばんは、㔺界。😀"""
_a = """こんにちは、世界。こんばんは、世界。😀"""
_a = tokenizer.encode(prefix_text + input_text )
_a = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_a = tokenizer.encode(__a , prefix_text=__a )
_a = tokenizer.decode(__a )
_a = tokenizer.decode(__a )
_a = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_a = """こんにちは、世界。"""
_a = """こんばんは、㔺界。😀"""
_a = len(tokenizer.encode(__a ) ) - 2
_a = len(tokenizer.encode(__a ) ) - 2
_a = [1] + [0] * (len_prefix + len_text + 1)
_a = [1] * (len_prefix + len_text + 1) + [0]
_a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_a = tokenizer(prefix_text + input_text ).token_type_ids
_a = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_a = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_a = tokenizer.encode("あンいワ" )
_a = tokenizer.encode("" , prefix_text="あンいワ" )
_a = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_a = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
_a = tokenizer(__a , padding=__a )
_a = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
_a = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# tokenizer has no padding token
pass
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
import numpy as np
def _lowercase ( lowerCamelCase__ : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Any ):
_a = ""
for i in table:
res += inp[i - 1]
return res
def _lowercase ( lowerCamelCase__ : str ):
return data[1:] + data[0]
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str] ):
_a = ""
for i in range(len(lowerCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict ):
_a = int("0b" + data[0] + data[-1], 2 )
_a = int("0b" + data[1:3], 2 )
return bin(s[row][col] )[2:]
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any] ):
_a = message[:4]
_a = message[4:]
_a = apply_table(lowerCamelCase__, lowerCamelCase__ )
_a = xor(lowerCamelCase__, lowerCamelCase__ )
_a = apply_sbox(lowerCamelCase__, temp[:4] ) # noqa: E741
_a = apply_sbox(lowerCamelCase__, temp[4:] )
_a = "0" * (2 - len(lowerCamelCase__ )) + l # noqa: E741
_a = "0" * (2 - len(lowerCamelCase__ )) + r
_a = apply_table(l + r, lowerCamelCase__ )
_a = xor(lowerCamelCase__, lowerCamelCase__ )
return temp + right
if __name__ == "__main__":
__snake_case : Any = input("Enter 10 bit key: ")
__snake_case : Dict = input("Enter 8 bit message: ")
__snake_case : Optional[int] = [6, 3, 7, 4, 8, 5, 10, 9]
__snake_case : Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__snake_case : Union[str, Any] = [2, 4, 3, 1]
__snake_case : int = [2, 6, 3, 1, 4, 8, 5, 7]
__snake_case : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__snake_case : Any = [4, 1, 2, 3, 2, 3, 4, 1]
__snake_case : Dict = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__snake_case : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__snake_case : Dict = apply_table(key, paa_table)
__snake_case : Dict = temp[:5]
__snake_case : str = temp[5:]
__snake_case : List[Any] = left_shift(left)
__snake_case : List[Any] = left_shift(right)
__snake_case : Tuple = apply_table(left + right, pa_table)
__snake_case : List[str] = left_shift(left)
__snake_case : Optional[int] = left_shift(right)
__snake_case : Optional[Any] = left_shift(left)
__snake_case : Union[str, Any] = left_shift(right)
__snake_case : List[str] = apply_table(left + right, pa_table)
# encryption
__snake_case : int = apply_table(message, IP)
__snake_case : Any = function(expansion, sa, sa, keya, temp)
__snake_case : List[str] = temp[4:] + temp[:4]
__snake_case : Tuple = function(expansion, sa, sa, keya, temp)
__snake_case : Optional[Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__snake_case : Tuple = apply_table(CT, IP)
__snake_case : Dict = function(expansion, sa, sa, keya, temp)
__snake_case : Dict = temp[4:] + temp[:4]
__snake_case : List[str] = function(expansion, sa, sa, keya, temp)
__snake_case : Dict = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Any, lowerCamelCase__ : str, lowerCamelCase__ : Any, lowerCamelCase__ : Any ):
_a = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_SCREAMING_SNAKE_CASE )] )
_a = np.array(_SCREAMING_SNAKE_CASE )
_a = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), _SCREAMING_SNAKE_CASE ) ), x.transpose() ), _SCREAMING_SNAKE_CASE )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any] ):
_a = (1, 2, 1)
_a = (1, 1, 0, 7)
_a = SARIMAX(
_SCREAMING_SNAKE_CASE, exog=_SCREAMING_SNAKE_CASE, order=_SCREAMING_SNAKE_CASE, seasonal_order=_SCREAMING_SNAKE_CASE )
_a = model.fit(disp=_SCREAMING_SNAKE_CASE, maxiter=600, method="nm" )
_a = model_fit.predict(1, len(_SCREAMING_SNAKE_CASE ), exog=[test_match] )
return result[0]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict ):
_a = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
_a = regressor.predict(_SCREAMING_SNAKE_CASE )
return y_pred[0]
def _lowercase ( lowerCamelCase__ : int ):
train_user.sort()
_a = np.percentile(_SCREAMING_SNAKE_CASE, 25 )
_a = np.percentile(_SCREAMING_SNAKE_CASE, 75 )
_a = qa - qa
_a = qa - (iqr * 0.1)
return low_lim
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ):
_a = 0
_a = 0
for i in list_vote:
if i > actual_result:
_a = not_safe + 1
else:
if abs(abs(_SCREAMING_SNAKE_CASE ) - abs(_SCREAMING_SNAKE_CASE ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__snake_case : str = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__snake_case : List[str] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__snake_case : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
__snake_case : Optional[Any] = normalize_df[:, 2].tolist()
__snake_case : Dict = normalize_df[:, 0].tolist()
__snake_case : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__snake_case : Optional[Any] = normalize_df[:, [1, 2]].tolist()
__snake_case : Any = x[: len(x) - 1]
__snake_case : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__snake_case : int = total_date[: len(total_date) - 1]
__snake_case : Union[str, Any] = total_user[: len(total_user) - 1]
__snake_case : Optional[Any] = total_match[: len(total_match) - 1]
__snake_case : str = total_date[len(total_date) - 1 :]
__snake_case : Optional[Any] = total_user[len(total_user) - 1 :]
__snake_case : int = total_match[len(total_match) - 1 :]
# voting system with forecasting
__snake_case : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__snake_case : Dict = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
import sys
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = len(lowercase__ )
_a = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
_a = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2, lowercase__ ):
for a in range(1, n - chain_length + 1 ):
_a = a + chain_length - 1
_a = sys.maxsize
for c in range(lowercase__, lowercase__ ):
_a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a = cost
_a = c
return matrix, sol
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
if i == j:
print("A" + str(lowercase__ ), end=" " )
else:
print("(", end=" " )
print_optiomal_solution(lowercase__, lowercase__, optimal_solution[i][j] )
print_optiomal_solution(lowercase__, optimal_solution[i][j] + 1, lowercase__ )
print(")", end=" " )
def _lowercase ( ):
_a = [30, 35, 15, 5, 10, 20, 25]
_a = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a , _a = matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__, 1, n - 1 )
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[str], lowerCamelCase__ : str, lowerCamelCase__ : Dict ):
for attribute in key.split("." ):
_a = getattr(lowerCAmelCase_, lowerCAmelCase_ )
if weight_type is not None:
_a = getattr(lowerCAmelCase_, lowerCAmelCase_ ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : Any ):
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, hf_model.config.feat_extract_norm == "group", )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
_a = True
if "*" in mapped_key:
_a = name.split(lowerCAmelCase_ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCAmelCase_ )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "weight" in name:
_a = '''weight'''
elif "bias" in name:
_a = '''bias'''
else:
_a = None
set_recursively(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, lowerCamelCase__ : Optional[Any]=True ):
if config_path is not None:
_a = HubertConfig.from_pretrained(lowerCAmelCase_ )
else:
_a = HubertConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(lowerCAmelCase_, "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_, "w", encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices, lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer(
lowerCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=lowerCAmelCase_, )
_a = True if config.feat_extract_norm == '''layer''' else False
_a = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_, )
_a = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_a = HubertForCTC(lowerCAmelCase_ )
else:
_a = HubertModel(lowerCAmelCase_ )
if is_finetuned:
_a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a = model[0].eval()
recursively_load_weights(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__snake_case : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCAmelCase : Tuple = CycleDiffusionPipeline
__UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(__snake_case )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Any:
_a = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
_a = image / 2 + 0.5
if str(__snake_case ).startswith("mps" ):
_a = torch.manual_seed(__snake_case )
else:
_a = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_a = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = CycleDiffusionPipeline(**__snake_case )
_a = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a = self.get_dummy_inputs(__snake_case )
_a = pipe(**__snake_case )
_a = output.images
_a = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , "half" ):
_a = module.half()
_a = CycleDiffusionPipeline(**__snake_case )
_a = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a = self.get_dummy_inputs(__snake_case )
_a = pipe(**__snake_case )
_a = output.images
_a = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCAmelCase ( self ) -> Any:
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def __lowerCAmelCase ( self ) -> Any:
return super().test_inference_batch_single_identical()
@skip_mps
def __lowerCAmelCase ( self ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCAmelCase ( self ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self ) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_a = init_image.resize((5_1_2, 5_1_2) )
_a = '''CompVis/stable-diffusion-v1-4'''
_a = DDIMScheduler.from_pretrained(__snake_case , subfolder="scheduler" )
_a = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a = '''A black colored car'''
_a = '''A blue colored car'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="np" , )
_a = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_a = init_image.resize((5_1_2, 5_1_2) )
_a = '''CompVis/stable-diffusion-v1-4'''
_a = DDIMScheduler.from_pretrained(__snake_case , subfolder="scheduler" )
_a = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a = '''A black colored car'''
_a = '''A blue colored car'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="np" , )
_a = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Tuple = DebertaTokenizer
__UpperCAmelCase : str = True
__UpperCAmelCase : Any = DebertaTokenizerFast
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "[UNK]"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
_a = "lower newer"
_a = "lower newer"
return input_text, output_text
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.get_tokenizer()
_a = "lower newer"
_a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_a = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_tokenizer()
_a = tokenizer("Hello" , "World" )
_a = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
_a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_a = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_a = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_a = tokenizer(snake_case_ , padding=snake_case_ )
_a = [tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) for seq in encoding["input_ids"]]
# fmt: off
_a = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_a = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , snake_case_ )
for expected, decoded in zip(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ ) | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
_a = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
_a = tempfile.mkdtemp()
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
# load decoder from hub
_a = "hf-internal-testing/ngram-beam-search-decoder"
def __lowerCAmelCase ( self , **snake_case_ ) -> int:
_a = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def __lowerCAmelCase ( self , **snake_case_ ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def __lowerCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = self.get_feature_extractor()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
_a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__a , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = floats_list((3, 1_0_0_0) )
_a = feature_extractor(__a , return_tensors="np" )
_a = processor(__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = "This is a test string"
_a = processor(text=__a )
_a = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self , snake_case_=(2, 1_0, 1_6) , snake_case_=7_7 ) -> List[Any]:
np.random.seed(__a )
return np.random.rand(*__a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
_a = processor.decode(__a )
_a = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_a = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
_a = processor.batch_decode(__a , __a )
_a = list(__a )
with get_context("fork" ).Pool() as p:
_a = decoder.decode_beams_batch(__a , __a )
_a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = self._get_dummy_logits()
_a = 1_5
_a = -20.0
_a = -4.0
_a = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
_a = decoded_processor_out.text
_a = list(__a )
with get_context("fork" ).Pool() as pool:
_a = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
_a = [d[0][0] for d in decoded_decoder_out]
_a = [d[0][2] for d in decoded_decoder_out]
_a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __a , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_a = self._get_dummy_logits()
_a = 2.0
_a = 5.0
_a = -20.0
_a = True
_a = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
_a = decoded_processor_out.text
_a = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("fork" ).Pool() as pool:
_a = decoder.decode_beams_batch(
__a , __a , )
_a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __a )
_a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def __lowerCAmelCase ( self ) -> Dict:
_a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_a = os.listdir(__a )
_a = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = snapshot_download("hf-internal-testing/processor_with_lm" )
_a = WavaVecaProcessorWithLM.from_pretrained(__a )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_a = os.listdir(__a )
_a = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def __lowerCAmelCase ( self ) -> List[str]:
_a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_a = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_a = floats_list((3, 1_0_0_0) )
_a = processor_wavaveca(__a , return_tensors="np" )
_a = processor_auto(__a , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_a = self._get_dummy_logits()
_a = processor_wavaveca.batch_decode(__a )
_a = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ ) -> int:
_a = [d[key] for d in offsets]
return retrieved_list
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_a = self._get_dummy_logits()[0]
_a = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_a = self._get_dummy_logits()
_a = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__a , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
_a = load_dataset("common_voice" , "en" , split="train" , streaming=__a )
_a = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
_a = iter(__a )
_a = next(__a )
_a = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_a = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_a = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_a = model(__a ).logits.cpu().numpy()
_a = processor.decode(logits[0] , output_word_offsets=__a )
_a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_a = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
_a = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , __a )
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , output.text )
# output times
_a = torch.tensor(self.get_from_offsets(__a , "start_time" ) )
_a = torch.tensor(self.get_from_offsets(__a , "end_time" ) )
# fmt: off
_a = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_a = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
def decorator(lowerCamelCase__ : Optional[int] ):
_a = getattr(__UpperCamelCase, "handle_key", [] )
handle += [key]
setattr(__UpperCamelCase, "handle_key", __UpperCamelCase )
return func
return decorator
def _lowercase ( *lowerCamelCase__ : str ):
def decorator(lowerCamelCase__ : Any ):
_a = getattr(__UpperCamelCase, "handle_key", [] )
handle += keys
setattr(__UpperCamelCase, "handle_key", __UpperCamelCase )
return func
return decorator
class A ( UpperCAmelCase_ ):
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
_a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , "key_handler" ):
setattr(_snake_case , "key_handler" , {} )
setattr(_snake_case , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_a = getattr(_snake_case , "handle_key" , [] )
for key in handled_keys:
_a = value
return new_cls
@staticmethod
def __lowerCAmelCase ( cls ) -> Tuple:
_a = get_character()
if char != KEYMAP["undefined"]:
_a = ord(_snake_case )
_a = cls.key_handler.get(_snake_case )
if handler:
_a = char
return handler(cls )
else:
return None
def _lowercase ( cls : Any ):
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.