code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
assert (
isinstance(snake_case__, snake_case__ ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
__lowercase ,__lowercase : Optional[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__lowercase ,__lowercase : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 |
"""simple docstring"""
def a__ ( ) -> int:
return 1
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case__ )
def a__ ( snake_case__ = 2_00 ) -> int:
return two_pound(snake_case__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 543 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def snake_case__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case__ ( ) -> Any:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
import apache_beam as beam
UpperCAmelCase_ = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase_ = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RealmTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Any = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Tuple = strip_accents
UpperCAmelCase__ : Tuple = tokenize_chinese_chars
UpperCAmelCase__ : Union[str, Any] = normalizer_class(**_lowerCAmelCase )
UpperCAmelCase__ : Dict = do_lower_case
def __UpperCAmelCase ( self , _lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
UpperCAmelCase__ : Optional[int] = text
UpperCAmelCase__ : Optional[int] = kwargs.pop("""text_pair""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""return_tensors""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_lowerCAmelCase ):
if batch_text_pair is not None:
UpperCAmelCase__ : str = batch_text_pair[idx]
else:
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : str = super().__call__(_lowerCAmelCase , _lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = encoded_candidates.get("""input_ids""" )
UpperCAmelCase__ : str = encoded_candidates.get("""attention_mask""" )
UpperCAmelCase__ : Union[str, Any] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowerCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowerCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(_lowerCAmelCase ) != 0}
return BatchEncoding(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Any = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[str] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 79 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Tuple = job["""started_at"""]
__magic_name__ : List[str] = job["""completed_at"""]
__magic_name__ : str = date_parser.parse(_A )
__magic_name__ : int = date_parser.parse(_A )
__magic_name__ : List[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__magic_name__ : Tuple = start
__magic_name__ : Optional[Any] = end
__magic_name__ : int = duration_in_min
return job_info
def UpperCamelCase ( _A, _A=None ):
"""simple docstring"""
__magic_name__ : Optional[int] = None
if token is not None:
__magic_name__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__magic_name__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__magic_name__ : List[Any] = requests.get(_A, headers=_A ).json()
__magic_name__ : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
__magic_name__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_A ):
__magic_name__ : Optional[Any] = requests.get(url + f'&page={i + 2}', headers=_A ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__magic_name__: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__magic_name__: Dict = parser.parse_args()
__magic_name__: Tuple = get_job_time(args.workflow_run_id)
__magic_name__: Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 324 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _a ( lowerCAmelCase__ ):
__a : Dict = "camembert"
def __init__( self : str , lowercase : str=30_522 , lowercase : List[str]=768 , lowercase : List[Any]=12 , lowercase : List[Any]=12 , lowercase : List[str]=3_072 , lowercase : Optional[Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : int=0.1 , lowercase : Union[str, Any]=512 , lowercase : Optional[int]=2 , lowercase : Optional[int]=0.02 , lowercase : str=1E-12 , lowercase : List[Any]=1 , lowercase : List[Any]=0 , lowercase : int=2 , lowercase : List[Any]="absolute" , lowercase : Optional[int]=True , lowercase : List[Any]=None , **lowercase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class _a ( lowerCAmelCase__ ):
@property
def A ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 712 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (_a : List[str] ):
UpperCAmelCase = {}
UpperCAmelCase = job['''started_at''']
UpperCAmelCase = job['''completed_at''']
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase = start
UpperCAmelCase = end
UpperCAmelCase = duration_in_min
return job_info
def snake_case_ (_a : str , _a : List[str]=None ):
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase = requests.get(_a , headers=_a ).json()
UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_a ).json()
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A =parser.parse_args()
A =get_job_time(args.workflow_run_id)
A =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 358 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = """data2vec-vision"""
def __init__( self , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=224 , __lowercase=16 , __lowercase=3 , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=[3, 5, 7, 11] , __lowercase=[1, 2, 3, 6] , __lowercase=True , __lowercase=0.4 , __lowercase=256 , __lowercase=1 , __lowercase=False , __lowercase=255 , **__lowercase , ) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :Union[str, Any] = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Optional[int] = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :List[Any] = hidden_dropout_prob
__UpperCamelCase :int = attention_probs_dropout_prob
__UpperCamelCase :Tuple = initializer_range
__UpperCamelCase :List[str] = layer_norm_eps
__UpperCamelCase :Union[str, Any] = image_size
__UpperCamelCase :Any = patch_size
__UpperCamelCase :Tuple = num_channels
__UpperCamelCase :Any = use_mask_token
__UpperCamelCase :str = use_absolute_position_embeddings
__UpperCamelCase :Optional[Any] = use_relative_position_bias
__UpperCamelCase :Any = use_shared_relative_position_bias
__UpperCamelCase :Optional[Any] = layer_scale_init_value
__UpperCamelCase :Tuple = drop_path_rate
__UpperCamelCase :Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__UpperCamelCase :List[Any] = out_indices
__UpperCamelCase :Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__UpperCamelCase :Tuple = use_auxiliary_head
__UpperCamelCase :List[Any] = auxiliary_loss_weight
__UpperCamelCase :Dict = auxiliary_channels
__UpperCamelCase :List[str] = auxiliary_num_convs
__UpperCamelCase :Dict = auxiliary_concat_input
__UpperCamelCase :Any = semantic_loss_ignore_index
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCamelCase__ ( self) -> float:
return 1E-4
| 167 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = BigBirdTokenizer
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int:
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token
__UpperCamelCase :str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token
__UpperCamelCase :Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token
__UpperCamelCase :Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__UpperCamelCase :str = vocab_file
__UpperCamelCase :int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :int = [self.sep_token_id]
__UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase):
copyfile(self.vocab_file , __lowercase)
return (out_vocab_file,)
| 167 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=13 , __UpperCamelCase : Any=30 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : str=3 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : int=3 , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[int]=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = TFDeiTModel(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : str ):
_UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFDeiTForMaskedImageModeling(__UpperCamelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = TFDeiTForImageClassification(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFDeiTForImageClassification(__UpperCamelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : str = False
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Dense ) )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple=False ):
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase__ ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : str ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="tf" )
# forward pass
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 129 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class __SCREAMING_SNAKE_CASE ( lowercase):
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Optional[int] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : str ):
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None ):
_UpperCAmelCase = max_length
_UpperCAmelCase = max_position_embeddings
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Any ):
_UpperCAmelCase = input_ids.shape[-1]
_UpperCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : int ):
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , __UpperCamelCase , )
_UpperCAmelCase = start_length
_UpperCAmelCase = max_new_tokens
_UpperCAmelCase = start_length + max_new_tokens
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : List[str] ):
return input_ids.shape[-1] >= self.max_length
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Union[str, Any] , __UpperCamelCase : float , __UpperCamelCase : Optional[float] = None ):
_UpperCAmelCase = max_time
_UpperCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Any ):
return time.time() - self.initial_timestamp > self.max_time
class __SCREAMING_SNAKE_CASE ( lowercase):
@add_start_docstrings(__UpperCamelCase )
def __call__( self : int , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Optional[Any] ):
return any(criteria(__UpperCamelCase , __UpperCamelCase ) for criteria in self )
@property
def UpperCAmelCase__ ( self : Dict ):
for stopping_criterium in self:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
return None
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> StoppingCriteriaList:
_UpperCAmelCase = stopping_criteria.max_length
_UpperCAmelCase = deepcopy(_lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase ) )
return new_stopping_criteria
| 129 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCAmelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> None:
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 3 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Dict = LEDTokenizer
A_ : Union[str, Any] = LEDTokenizerFast
A_ : Tuple = True
def __UpperCamelCase ( self : str ) -> str:
super().setUp()
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def __UpperCamelCase ( self : str , **__UpperCamelCase : str ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : List[str] , **__UpperCamelCase : List[Any] ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __UpperCamelCase ( self : str ) -> str:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __UpperCamelCase ( self : str ) -> Optional[Any]:
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = tokenizer(__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors='pt' )
self.assertIn('input_ids' , __UpperCamelCase )
self.assertIn('attention_mask' , __UpperCamelCase )
self.assertNotIn('labels' , __UpperCamelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCamelCase )
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
A = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = tokenizer(text_target=__UpperCamelCase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def __UpperCamelCase ( self : int ) -> Optional[Any]:
A = ['A long paragraph for summarization.']
A = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = tokenizer(__UpperCamelCase , return_tensors='pt' )
A = tokenizer(text_target=__UpperCamelCase , return_tensors='pt' )
A = inputs['input_ids']
A = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A = ['Summary of the text.', 'Another summary.']
A = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
A = [[0] * len(__UpperCamelCase ) for x in encoded_output['input_ids']]
A = tokenizer.pad(__UpperCamelCase )
self.assertSequenceEqual(outputs['global_attention_mask'] , __UpperCamelCase )
def __UpperCamelCase ( self : str ) -> int:
pass
def __UpperCamelCase ( self : Dict ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A = 'A, <mask> AllenNLP sentence.'
A = tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
A = tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) | 106 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCamelCase :
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : torch.Tensor # [batch_size x 3]
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float
lowerCamelCase : float
lowerCamelCase : Tuple[int]
def __a ( self ) -> Union[str, Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __a ( self ) -> Any:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __a ( self ) -> str:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __a ( self ) -> torch.Tensor:
a : int = torch.arange(self.height * self.width )
a : Optional[int] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase__ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __a ( self ) -> Optional[Any]:
a, *a : Dict = self.shape
a : Any = int(np.prod(lowerCAmelCase__ ) )
a : Optional[int] = self.get_image_coords()
a : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
a : Dict = self.get_camera_rays(lowerCAmelCase__ )
a : Tuple = rays.view(lowerCAmelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __a ( self , lowerCAmelCase__ ) -> torch.Tensor:
a, *a, a : List[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
a : Optional[int] = coords.view(lowerCAmelCase__ , -1 , 2 )
a : Dict = self.resolution()
a : Optional[Any] = self.fov()
a : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
a : str = fracs * torch.tan(fov / 2 )
a : Tuple = fracs.view(lowerCAmelCase__ , -1 , 2 )
a : List[Any] = (
self.z.view(lowerCAmelCase__ , 1 , 3 )
+ self.x.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
a : Dict = directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase__ )
a : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase__ , *lowerCAmelCase__ , 2 , 3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->DifferentiableProjectiveCamera:
'''simple docstring'''
a : int = []
a : str = []
a : List[str] = []
a : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
a : Optional[Any] = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
a : Union[str, Any] = -z * 4
a : Tuple = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
a : List[Any] = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 31 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
SCREAMING_SNAKE_CASE__ : List[Any] =(
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
SCREAMING_SNAKE_CASE__ : List[Any] =(
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
SCREAMING_SNAKE_CASE__ : List[str] =(
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
SCREAMING_SNAKE_CASE__ : Tuple =(
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
SCREAMING_SNAKE_CASE__ : Optional[int] =(
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
SCREAMING_SNAKE_CASE__ : Tuple =(
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
SCREAMING_SNAKE_CASE__ : Optional[int] =(
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def UpperCamelCase ( ) ->List[str]:
_lowerCamelCase, _lowerCamelCase : Dict = randrange(len(SCREAMING_SNAKE_CASE_ ) ), randrange(len(SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Dict = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
_lowerCamelCase, _lowerCamelCase : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 100 ) ->List[Any]:
return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE_ ))
@pytest.mark.parametrize('''hand, expected''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : Dict = PokerHand(SCREAMING_SNAKE_CASE_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
assert PokerHand(SCREAMING_SNAKE_CASE_ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
assert PokerHand(SCREAMING_SNAKE_CASE_ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE_ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[Any]:
assert PokerHand(SCREAMING_SNAKE_CASE_ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE_ ) ) == expected
def UpperCamelCase ( ) ->Tuple:
_lowerCamelCase : Tuple = [PokerHand(SCREAMING_SNAKE_CASE_ ) for hand in SORTED_HANDS]
_lowerCamelCase : int = poker_hands.copy()
shuffle(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = chain(sorted(SCREAMING_SNAKE_CASE_ ) )
for index, hand in enumerate(SCREAMING_SNAKE_CASE_ ):
assert hand == poker_hands[index]
def UpperCamelCase ( ) ->str:
# Test that five high straights are compared correctly.
_lowerCamelCase : List[str] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=SCREAMING_SNAKE_CASE_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase ( ) ->Tuple:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowerCamelCase : Optional[int] = PokerHand('''2C 4S AS 3D 5C''' )
_lowerCamelCase : int = True
_lowerCamelCase : str = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase ( ) ->int:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''poker_hands.txt''' )
with open(SCREAMING_SNAKE_CASE_ ) as file_hand:
for line in file_hand:
_lowerCamelCase : Any = line[:14].strip()
_lowerCamelCase : List[Any] = line[15:].strip()
_lowerCamelCase, _lowerCamelCase : Tuple = PokerHand(SCREAMING_SNAKE_CASE_ ), PokerHand(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = player.compare_with(SCREAMING_SNAKE_CASE_ )
if output == "Win":
answer += 1
assert answer == 376
| 434 | """simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase : int = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
_lowerCamelCase : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
_lowerCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[str] = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 434 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def _a ( self , lowercase_) -> List[str]:
if os.path.isfile(lowercase_):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = os.path.join(lowercase_ , 'question_encoder_tokenizer')
__snake_case = os.path.join(lowercase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(lowercase_)
self.generator.save_pretrained(lowercase_)
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , lowercase_)
if config is None:
__snake_case = RagConfig.from_pretrained(lowercase_)
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=lowercase_ , generator=lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> Tuple:
return self.current_tokenizer(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.generator.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.generator.decode(*lowercase_ , **lowercase_)
def _a ( self) -> int:
__snake_case = self.question_encoder
def _a ( self) -> Optional[int]:
__snake_case = self.generator
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
__snake_case = labels['input_ids']
return model_inputs
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
__A : str = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__A : Dict = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
__A : Dict = "▁"
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_A = len(self.sp_model ) - 1
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ):
return len(self.sp_model )
def lowerCAmelCase__ ( self ):
_A = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(snake_case_ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(snake_case_ )
_A = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __getstate__( self ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self , snake_case_ ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_A = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 27 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A_ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "ernie_m"
A__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _lowerCAmelCase = 25_0002 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 514 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1E-05 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = is_decoder
lowerCamelCase__ = act_dropout
| 719 |
def __UpperCamelCase ( a = 100) ->int:
lowerCamelCase__ = (n * (n + 1) // 2) ** 2
lowerCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : int ) -> str:
debug_launcher(test_script.main )
def __lowercase( self : Any ) -> Any:
debug_launcher(test_ops.main )
| 344 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE : Any = """sshleifer/bart-tiny-random"""
_SCREAMING_SNAKE_CASE : List[str] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __lowercase( self : str ) -> Dict:
return AutoConfig.from_pretrained(__lowerCamelCase )
def __lowercase( self : Optional[int] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def __lowercase( self : List[Any] ) -> Optional[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
def __lowercase( self : str ) -> List[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : str = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def __lowercase( self : Union[str, Any] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : int = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def __lowercase( self : Union[str, Any] ) -> Tuple:
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=__lowerCamelCase, d=__lowerCamelCase )
| 344 | 1 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if not (isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and isinstance(lowerCAmelCase_, lowerCAmelCase_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase = i
__lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a_ ( lowerCAmelCase_ : Features ):
__lowerCAmelCase = np.inf
def set_batch_size(lowerCAmelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and feature.dtype == "binary":
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase_, lowerCAmelCase_ )
return None if batch_size is np.inf else batch_size
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : NestedDataStructureLike[PathLike] , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> str:
super().__init__(
lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths}
__lowerCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCAmelCase = Parquet(
cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase ( self : Optional[int] ) -> int:
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : Union[PathLike, BinaryIO] , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> Any:
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCAmelCase = parquet_writer_kwargs
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCAmelCase = self._write(file_obj=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
else:
__lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
return written
def lowercase ( self : List[Any] , lowerCAmelCase_ : BinaryIO , lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = 0
__lowerCAmelCase = parquet_writer_kwargs.pop('path_or_buf' , lowerCAmelCase_ )
__lowerCAmelCase = self.dataset.features.arrow_schema
__lowerCAmelCase = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ , **lowerCAmelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCAmelCase_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCAmelCase = query_table(
table=self.dataset._data , key=slice(lowerCAmelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCAmelCase_ )
written += batch.nbytes
writer.close()
return written
| 421 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : Tuple = None
if token is not None:
_a : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_a : List[str] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_a : str = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_a : int = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_a : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_a : Union[str, Any] = requests.get(url + f"""&page={i + 2}""" , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : int = None
if token is not None:
_a : int = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_a : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_a : str = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_a : str = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
_a : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_a : str = requests.get(url + f"""&page={i + 2}""" , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Union[str, Any] = None
if token is not None:
_a : Tuple = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_a : Dict = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_a : Tuple = result.headers["Location"]
_a : int = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_a : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fp:
fp.write(response.content )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : List[str] = []
_a : Any = []
_a : Dict = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_a : int = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_a : Optional[int] = line[: line.index(''': ''' )]
_a : str = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
_a : Union[str, Any] = line[len('''FAILED ''' ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_a : int = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` """
f"""and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
_a : Tuple = None
if job_name and job_links:
_a : Any = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_a : List[Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : Tuple = []
_a : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : Any = Counter()
counter.update([x[1] for x in logs] )
_a : Any = counter.most_common()
_a : Optional[Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_a : Any = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_a : List[Any] = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : List[str] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
_a : Optional[int] = test.split('''/''' )[2]
else:
_a : Tuple = None
return test
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ):
_a : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_a : Union[str, Any] = [x for x in logs if x[2] is not None]
_a : Optional[Any] = {x[2] for x in logs}
_a : Union[str, Any] = {}
for test in tests:
_a : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_a : Tuple = counter.most_common()
_a : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_a : Any = sum(error_counts.values() )
if n_errors > 0:
_a : Dict = {"count": n_errors, "errors": error_counts}
_a : str = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : List[Any] = "| no. | error | status |"
_a : str = "|-:|:-|:-|"
_a : int = [header, sep]
for error in reduced_by_error:
_a : Optional[int] = reduced_by_error[error]["count"]
_a : Optional[Any] = f"""| {count} | {error[:100]} | |"""
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Dict = "| model | no. of errors | major error | count |"
_a : Tuple = "|-:|-:|-:|-:|"
_a : List[str] = [header, sep]
for model in reduced_by_model:
_a : Optional[int] = reduced_by_model[model]["count"]
_a : List[Any] = list(reduced_by_model[model]['''errors'''].items() )[0]
_a : Optional[Any] = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__UpperCAmelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCAmelCase : Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
__UpperCAmelCase : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCAmelCase : List[Any] = k.find(' / ')
__UpperCAmelCase : Any = k[index + len(' / ') :]
__UpperCAmelCase : List[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCAmelCase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCAmelCase : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCAmelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase : Any = reduce_by_error(errors)
__UpperCAmelCase : str = reduce_by_model(errors)
__UpperCAmelCase : str = make_github_table(reduced_by_error)
__UpperCAmelCase : int = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 471 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
lowercase__ : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 397 | 0 |
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a_ : Union[str, Any] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def _A (lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Any:
'''simple docstring'''
_a = True
while ask_again:
_a = input(lowerCAmelCase__ )
try:
if default is not None and len(lowerCAmelCase__ ) == 0:
return default
return convert_value(lowerCAmelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCAmelCase__ )
def _A (lowerCAmelCase__ , lowerCAmelCase__=[] , lowerCAmelCase__=None , lowerCAmelCase__=0 ) -> str:
'''simple docstring'''
_a = BulletMenu(lowerCAmelCase__ , lowerCAmelCase__ )
_a = menu.run(default_choice=lowerCAmelCase__ )
return convert_value(lowerCAmelCase__ ) if convert_value is not None else result
def _A (lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
_a = int(lowerCAmelCase__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _A (lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
_a = int(lowerCAmelCase__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _A (lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
_a = int(lowerCAmelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _A (lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
_a = int(lowerCAmelCase__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _A (lowerCAmelCase__ ) -> Any:
'''simple docstring'''
_a = int(lowerCAmelCase__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _A (lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class a ( argparse.RawDescriptionHelpFormatter ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
_a = super()._format_usage(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = usage.replace('<command> [<args>] ' , '' )
return usage
| 706 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
_a = BertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 532 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowercase : List[str] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__lowercase : Tuple = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
__lowercase : Union[str, Any] = """|""".join(sys.argv[1:])
__lowercase : str = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__lowercase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 36 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE__ : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
SCREAMING_SNAKE_CASE__ : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = """whisper"""
_UpperCamelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case=51_865 , snake_case=80 , snake_case=6 , snake_case=4 , snake_case=6 , snake_case=4 , snake_case=1_536 , snake_case=1_536 , snake_case=0.0 , snake_case=0.0 , snake_case=50_257 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=256 , snake_case=0.0 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=False , snake_case=1_500 , snake_case=448 , snake_case=50_256 , snake_case=50_256 , snake_case=50_256 , snake_case=None , snake_case=[220, 50_256] , snake_case=False , snake_case=256 , snake_case=False , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=0 , snake_case=7 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = vocab_size
a__ : int = num_mel_bins
a__ : Dict = d_model
a__ : List[Any] = encoder_layers
a__ : List[Any] = encoder_attention_heads
a__ : Optional[int] = decoder_layers
a__ : int = decoder_attention_heads
a__ : Optional[Any] = decoder_ffn_dim
a__ : List[Any] = encoder_ffn_dim
a__ : int = dropout
a__ : Optional[int] = attention_dropout
a__ : Tuple = activation_dropout
a__ : Optional[Any] = activation_function
a__ : List[Any] = init_std
a__ : List[Any] = encoder_layerdrop
a__ : Dict = decoder_layerdrop
a__ : List[Any] = use_cache
a__ : Union[str, Any] = encoder_layers
a__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Tuple = max_source_positions
a__ : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[Any] = classifier_proj_size
a__ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Tuple = apply_spec_augment
a__ : int = mask_time_prob
a__ : Optional[Any] = mask_time_length
a__ : List[str] = mask_time_min_masks
a__ : List[str] = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : Any = mask_feature_min_masks
a__ : List[str] = median_filter_width
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , suppress_tokens=snake_case , begin_suppress_tokens=snake_case , **snake_case , )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a__ : Dict = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
a__ : Tuple = {0: "batch"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
def _snake_case ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 22_050 , snake_case = 5.0 , snake_case = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
a__ : int = OrderedDict()
a__ : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case , framework=snake_case , sampling_rate=snake_case , time_duration=snake_case , frequency=snake_case , )
a__ : Optional[int] = encoder_inputs["input_features"].shape[2]
a__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case , snake_case , snake_case , snake_case )
a__ : Any = encoder_inputs.pop("input_features" )
a__ : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
a__ : Tuple = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-3
| 112 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case = random.Random()
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase : int = global_rng
_lowerCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=7 , _snake_case=400 , _snake_case=2000 , _snake_case=2048 , _snake_case=128 , _snake_case=1 , _snake_case=512 , _snake_case=30 , _snake_case=4_4100 , ):
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[Any] = min_seq_length
_lowerCAmelCase : List[str] = max_seq_length
_lowerCAmelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Union[str, Any] = spectrogram_length
_lowerCAmelCase : Any = feature_size
_lowerCAmelCase : int = num_audio_channels
_lowerCAmelCase : int = hop_length
_lowerCAmelCase : List[Any] = chunk_length
_lowerCAmelCase : str = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=False , _snake_case=False ):
def _flatten(_snake_case ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
_lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : str = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(_snake_case , "feature_size" ) )
self.assertTrue(hasattr(_snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(_snake_case , "hop_length" ) )
self.assertTrue(hasattr(_snake_case , "chunk_length" ) )
self.assertTrue(hasattr(_snake_case , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
_lowerCAmelCase : Tuple = self.feature_extraction_class.from_pretrained(_snake_case )
_lowerCAmelCase : int = feat_extract_first.to_dict()
_lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
_lowerCAmelCase : Any = dict_first.pop("mel_filters" )
_lowerCAmelCase : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : str = os.path.join(_snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(_snake_case )
_lowerCAmelCase : Tuple = self.feature_extraction_class.from_json_file(_snake_case )
_lowerCAmelCase : Dict = feat_extract_first.to_dict()
_lowerCAmelCase : Dict = feat_extract_second.to_dict()
_lowerCAmelCase : Tuple = dict_first.pop("mel_filters" )
_lowerCAmelCase : List[str] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize feature_extractor
_lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : int = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_lowerCAmelCase : str = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_lowerCAmelCase : str = feature_extractor(
_snake_case , return_tensors="np" , sampling_rate=4_4100 , mask_audio=_snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : List[Any] = np.asarray(_snake_case )
_lowerCAmelCase : str = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_lowerCAmelCase : List[Any] = ds.sort("id" ).select(range(_snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_lowerCAmelCase : List[str] = TvltFeatureExtractor()
_lowerCAmelCase : Optional[int] = feature_extractor(_snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _snake_case , atol=1E-4 ) )
| 710 | import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Union[str, Any] = (32, 32)
_lowerCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
def extract(*_snake_case , **_snake_case ):
class __A :
'''simple docstring'''
def __init__( self ):
_lowerCAmelCase : List[str] = torch.ones([0] )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.dummy_cond_unet
_lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
_lowerCAmelCase : List[str] = self.dummy_vae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[Any] = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : List[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_snake_case , )[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.dummy_cond_unet
_lowerCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase : Tuple = self.dummy_vae
_lowerCAmelCase : List[Any] = self.dummy_text_encoder
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : int = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Tuple = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(0 )
_lowerCAmelCase : int = sd_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_snake_case , )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_snake_case )
assert isinstance(_snake_case , _snake_case )
assert isinstance(pipe.scheduler , _snake_case )
assert pipe.safety_checker is None
_lowerCAmelCase : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : Union[str, Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.dummy_cond_unet
_lowerCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase : Tuple = self.dummy_vae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
_lowerCAmelCase : List[Any] = unet.half()
_lowerCAmelCase : List[Any] = vae.half()
_lowerCAmelCase : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Dict = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case )
_lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : str = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Tuple = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
_lowerCAmelCase : Tuple = 40_0366_0346
_lowerCAmelCase : Dict = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase : Optional[int] = torch.manual_seed(_snake_case )
_lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_lowerCAmelCase : Tuple = torch.manual_seed(_snake_case )
_lowerCAmelCase : int = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case )
_lowerCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Optional[int] = "padme amidala taking a bath artwork, safe for work, no nudity"
_lowerCAmelCase : str = 27_3497_1755
_lowerCAmelCase : List[Any] = 7
_lowerCAmelCase : Tuple = torch.manual_seed(_snake_case )
_lowerCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_lowerCAmelCase : Tuple = torch.manual_seed(_snake_case )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : List[str] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
_lowerCAmelCase : int = 10_4435_5234
_lowerCAmelCase : Union[str, Any] = 12
_lowerCAmelCase : List[Any] = torch.manual_seed(_snake_case )
_lowerCAmelCase : List[str] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_lowerCAmelCase : Optional[int] = torch.manual_seed(_snake_case )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : str = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 587 | 0 |
def lowerCamelCase__ ( ):
"""simple docstring"""
return 1
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase )
def lowerCamelCase__ ( lowercase = 200 ):
"""simple docstring"""
return two_pound(lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =[0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =[0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_lowerCAmelCase)
| 474 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 476 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( _a , _a , _a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] =AltDiffusionPipeline
_SCREAMING_SNAKE_CASE : int =TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] =TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
def a__ ( self ):
torch.manual_seed(0 )
_A= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_A= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_A= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_A= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_A= CLIPTextModel(lowerCAmelCase__ )
_A= XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_A= 77
_A= {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
if str(lowerCAmelCase__ ).startswith('mps' ):
_A= torch.manual_seed(lowerCAmelCase__ )
else:
_A= torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_A= {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def a__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a__ ( self ):
_A= 'cpu' # ensure determinism for the device-dependent torch.Generator
_A= self.get_dummy_components()
torch.manual_seed(0 )
_A= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A= RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_A= text_encoder
_A= AltDiffusionPipeline(**lowerCAmelCase__ )
_A= alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= self.get_dummy_inputs(lowerCAmelCase__ )
_A= 'A photo of an astronaut'
_A= alt_pipe(**lowerCAmelCase__ )
_A= output.images
_A= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A= np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
_A= 'cpu' # ensure determinism for the device-dependent torch.Generator
_A= self.get_dummy_components()
_A= PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
_A= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A= RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_A= text_encoder
_A= AltDiffusionPipeline(**lowerCAmelCase__ )
_A= alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= self.get_dummy_inputs(lowerCAmelCase__ )
_A= alt_pipe(**lowerCAmelCase__ )
_A= output.images
_A= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A= np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
# make sure here that pndm scheduler skips prk
_A= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase__ )
_A= alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= 'A painting of a squirrel eating a burger'
_A= torch.manual_seed(0 )
_A= alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_A= output.images
_A= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A= np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
_A= DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_A= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
_A= alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= 'A painting of a squirrel eating a burger'
_A= torch.manual_seed(0 )
_A= alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='numpy' )
_A= output.images
_A= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A= np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 476 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : List[str] = 'codegen'
__magic_name__ : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , lowerCAmelCase : str=50400 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : str=4096 , lowerCAmelCase : List[str]=28 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Optional[int]=64 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int="gelu_new" , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=50256 , lowerCAmelCase : Dict=50256 , lowerCAmelCase : List[str]=False , **lowerCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : int = vocab_size
__UpperCamelCase : Optional[int] = n_ctx
__UpperCamelCase : Tuple = n_positions
__UpperCamelCase : Optional[Any] = n_embd
__UpperCamelCase : List[str] = n_layer
__UpperCamelCase : str = n_head
__UpperCamelCase : str = n_inner
__UpperCamelCase : List[str] = rotary_dim
__UpperCamelCase : List[Any] = activation_function
__UpperCamelCase : Union[str, Any] = resid_pdrop
__UpperCamelCase : List[Any] = embd_pdrop
__UpperCamelCase : Optional[int] = attn_pdrop
__UpperCamelCase : Any = layer_norm_epsilon
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : List[str] = use_cache
__UpperCamelCase : List[Any] = bos_token_id
__UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , lowerCAmelCase ):
# TODO: how to do that better?
__UpperCamelCase : Dict = 0
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" )
__UpperCamelCase : List[str] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : str ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowerCamelCase__ ( self : int , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase : Optional[Any] = seqlen + 2
__UpperCamelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase : int = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
__UpperCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase : Dict = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Any ) -> int:
"""simple docstring"""
return 13
| 279 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Any = data
__UpperCamelCase : List[str] = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0]
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : int = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
__UpperCamelCase : int = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def lowerCamelCase__ ( self : str ) -> str:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = list(struct.unpack(""">16L""" , lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
__UpperCamelCase : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Dict = self.padding()
__UpperCamelCase : int = self.split_blocks()
for block in self.blocks:
__UpperCamelCase : Union[str, Any] = self.expand_block(lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__UpperCamelCase : List[Any] = (b & c) | ((~b) & d)
__UpperCamelCase : str = 0x5_a_8_2_7_9_9_9
elif 20 <= i < 40:
__UpperCamelCase : Tuple = b ^ c ^ d
__UpperCamelCase : List[Any] = 0x6_e_d_9_e_b_a_1
elif 40 <= i < 60:
__UpperCamelCase : Tuple = (b & c) | (b & d) | (c & d)
__UpperCamelCase : List[str] = 0x8_f_1_b_b_c_d_c
elif 60 <= i < 80:
__UpperCamelCase : Optional[int] = b ^ c ^ d
__UpperCamelCase : Tuple = 0xc_a_6_2_c_1_d_6
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = (
self.rotate(lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f,
a,
self.rotate(lowerCAmelCase , 30 ),
c,
d,
)
__UpperCamelCase : List[Any] = (
self.h[0] + a & 0xf_f_f_f_f_f_f_f,
self.h[1] + b & 0xf_f_f_f_f_f_f_f,
self.h[2] + c & 0xf_f_f_f_f_f_f_f,
self.h[3] + d & 0xf_f_f_f_f_f_f_f,
self.h[4] + e & 0xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def A__ () -> int:
__UpperCamelCase : Optional[int] = b"""Test String"""
assert SHAaHash(snake_case ).final_hash() == hashlib.shaa(snake_case ).hexdigest() # noqa: S324
def A__ () -> Tuple:
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__UpperCamelCase : List[Any] = parser.parse_args()
__UpperCamelCase : List[str] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__UpperCamelCase : Tuple = f.read()
else:
__UpperCamelCase : Any = bytes(snake_case , """utf-8""" )
print(SHAaHash(snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 279 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if os.path.exists(SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , """config.json""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , """config.json""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , """config.json""" ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , """pytorch_model.bin""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , """pytorch_model.bin""" ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
lowerCAmelCase = 2
if unlogit:
lowerCAmelCase = torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = p * torch.log(SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(F'{x + 1}' for x in range(len(SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + """\t""".join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + """\t""".join(F'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
lowerCAmelCase = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
lowerCAmelCase = torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase = None
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = entropy(attn.detach() , SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase = 2
lowerCAmelCase = torch.pow(torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowerCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(SCREAMING_SNAKE_CASE )
logger.info("""Head ranked by importance scores""" )
lowerCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase = head_ranks.view_as(SCREAMING_SNAKE_CASE )
print_ad_tensor(SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
lowerCAmelCase = torch.ones_like(SCREAMING_SNAKE_CASE )
lowerCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase = float("""Inf""" )
lowerCAmelCase = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase = new_head_mask.view(-1 )
lowerCAmelCase = 0.0
lowerCAmelCase = new_head_mask.view_as(SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE )
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , actually_pruned=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(SCREAMING_SNAKE_CASE , args.output_dir )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=SCREAMING_SNAKE_CASE , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=SCREAMING_SNAKE_CASE , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=SCREAMING_SNAKE_CASE , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=SCREAMING_SNAKE_CASE , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" )
lowerCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase = torch.device("""cuda""" , args.local_rank )
lowerCAmelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
lowerCAmelCase = nn.DataParallel(SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE )
# Prepare dataset
lowerCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase = (torch.from_numpy(SCREAMING_SNAKE_CASE ),)
lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase = mask_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
prune_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 393 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2 ):
'''simple docstring'''
lowerCAmelCase = qubits
# Using Aer's simulator
lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 393 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = 16 ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='longest' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase_ ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['lr']
SCREAMING_SNAKE_CASE__ = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE__ = int(config['seed'] )
SCREAMING_SNAKE_CASE__ = int(config['batch_size'] )
set_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = os.path.split(UpperCamelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCamelCase_ ),
'epoch': epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=False , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
super().__init__(**_A)
lowercase__ = vocab_size
lowercase__ = d_embed
lowercase__ = d_proj
lowercase__ = cutoffs + [vocab_size]
lowercase__ = [0] + self.cutoffs
lowercase__ = div_val
lowercase__ = self.cutoffs[0]
lowercase__ = len(self.cutoffs) - 1
lowercase__ = self.shortlist_size + self.n_clusters
lowercase__ = keep_order
lowercase__ = []
lowercase__ = []
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if self.n_clusters > 0:
lowercase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_A , name='cluster_weight')
lowercase__ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_A , name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
lowercase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_A , name=f'''out_projs_._{i}''' , )
self.out_projs.append(_A)
else:
self.out_projs.append(_A)
lowercase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
lowercase__ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
lowercase__, lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.d_embed // (self.div_val**i)
lowercase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_A , name=f'''out_projs_._{i}''')
self.out_projs.append(_A)
lowercase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
super().build(_A)
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None) -> int:
"""simple docstring"""
lowercase__ = x
if proj is not None:
lowercase__ = tf.einsum('ibd,ed->ibe' , _A , _A)
return tf.einsum('ibd,nd->ibn' , _A , _A) + b
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : str , lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = shape_list(_A)
lowercase__ = tf.range(lp_size[0] , dtype=target.dtype)
lowercase__ = tf.stack([r, target] , 1)
return tf.gather_nd(_A , _A)
def UpperCAmelCase ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=True , lowerCAmelCase : Dict=False) -> int:
"""simple docstring"""
lowercase__ = 0
if self.n_clusters == 0:
lowercase__ = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
lowercase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A)
lowercase__ = tf.nn.log_softmax(_A , axis=-1)
else:
lowercase__ = shape_list(_A)
lowercase__ = []
lowercase__ = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
lowercase__, lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ = (target >= l_idx) & (target < r_idx)
lowercase__ = tf.where(_A)
lowercase__ = tf.boolean_mask(_A , _A) - l_idx
if self.div_val == 1:
lowercase__ = self.out_layers[0][0][l_idx:r_idx]
lowercase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ = self.out_layers[i][0]
lowercase__ = self.out_layers[i][1]
if i == 0:
lowercase__ = tf.concat([cur_W, self.cluster_weight] , 0)
lowercase__ = tf.concat([cur_b, self.cluster_bias] , 0)
lowercase__ = self._logit(_A , _A , _A , self.out_projs[0])
lowercase__ = tf.nn.log_softmax(_A)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
lowercase__ = tf.boolean_mask(_A , _A)
lowercase__ = self._gather_logprob(_A , _A)
else:
lowercase__ = self._logit(_A , _A , _A , self.out_projs[i])
lowercase__ = tf.nn.log_softmax(_A)
lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A)
if target is not None:
lowercase__ = tf.boolean_mask(_A , _A)
lowercase__ = tf.boolean_mask(_A , _A)
lowercase__ = self._gather_logprob(_A , _A)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A))
lowercase__ = tf.concat(_A , axis=-1)
if target is not None:
if return_mean:
lowercase__ = tf.reduce_mean(_A)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='mean' if return_mean else '')
return out
| 714 |
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE__ : Dict = """pt"""
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : List[str] = """tf"""
else:
SCREAMING_SNAKE_CASE__ : List[Any] = """jax"""
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = PerceiverTokenizer
a__ = False
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2_0 , __lowerCAmelCase=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ :List[str] = []
for i in range(len(__lowerCAmelCase ) ):
try:
__magic_name__ :Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ :Dict = list(filter(lambda __lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCAmelCase ) )
__magic_name__ :Union[str, Any] = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
__magic_name__ :int = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
__magic_name__ :str = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ :Dict = [t[0] for t in toks]
# Ensure consistency
__magic_name__ :List[str] = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
__magic_name__ :Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
__magic_name__ :Union[str, Any] = ''' ''' + output_txt
__magic_name__ :str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.perceiver_tokenizer
__magic_name__ :List[Any] = '''Unicode €.'''
__magic_name__ :Tuple = tokenizer(__lowerCAmelCase )
__magic_name__ :Optional[int] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
__magic_name__ :List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' )
__magic_name__ :int = tokenizer('''e è é ê ë''' )
__magic_name__ :List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
__magic_name__ :Tuple = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.perceiver_tokenizer
__magic_name__ :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__magic_name__ :Union[str, Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__magic_name__ :List[str] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
__magic_name__ :List[Any] = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.perceiver_tokenizer
__magic_name__ :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__magic_name__ :Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''' , __lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.perceiver_tokenizer
__magic_name__ :Optional[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
__magic_name__ :int = tokenizer(
text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def A ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
__magic_name__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__magic_name__ :Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :Union[str, Any] = tempfile.mkdtemp()
__magic_name__ :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :int = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
__magic_name__ :int = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :Union[str, Any] = tempfile.mkdtemp()
__magic_name__ :Tuple = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__magic_name__ :Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ :List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :int = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__magic_name__ :Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :Union[str, Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :Tuple = json.load(__lowerCAmelCase )
__magic_name__ :str = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
__magic_name__ :List[str] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__magic_name__ :Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ :Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ :Optional[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCAmelCase )]
__magic_name__ :Any = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__magic_name__ :List[Any] = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ :Any = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__magic_name__ :Union[str, Any] = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=4 , _A="gelu" , _A=0.0 , _A=0.1 , _A=True , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> List[Any]:
__a : Dict = parent
__a : str = batch_size
__a : Any = seq_length
__a : int = is_training
__a : List[Any] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Optional[Any] = use_labels
__a : Tuple = vocab_size
__a : List[str] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : int = num_attention_heads
__a : str = intermediate_multiple_size
__a : Tuple = hidden_act
__a : str = hidden_dropout
__a : Tuple = attention_dropout
__a : str = weight_tying
__a : Any = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : List[Any] = initializer_range
__a : List[Any] = num_labels
__a : Dict = num_choices
__a : List[Any] = scope
def __magic_name__ ( self ) -> Any:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : str = None
if self.use_input_mask:
__a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self ) -> List[str]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def __magic_name__ ( self ) -> Dict:
__a , __a , __a , __a : Any = self.prepare_config_and_inputs()
__a : List[Any] = True
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self , _A , _A , _A ) -> Optional[int]:
__a : str = GPTNeoXJapaneseModel(config=_A )
model.to(_A )
model.eval()
__a : Optional[Any] = model(_A , attention_mask=_A )
__a : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _A , _A , _A ) -> List[str]:
__a : int = True
__a : Union[str, Any] = GPTNeoXJapaneseModel(_A )
model.to(_A )
model.eval()
__a : Optional[int] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _A , _A , _A , _A ) -> List[Any]:
__a : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
__a : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _A , _A , _A ) -> Tuple:
__a : str = True
__a : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__a : int = model(_A , attention_mask=_A , use_cache=_A )
__a : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__a : Tuple = model(_A , attention_mask=_A , output_hidden_states=_A )
__a : Optional[int] = output_from_no_past['hidden_states'][0]
__a : Optional[int] = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__a : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__a : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __magic_name__ ( self ) -> Any:
__a : int = self.prepare_config_and_inputs()
__a , __a , __a , __a : Dict = config_and_inputs
__a : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_A = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def __magic_name__ ( self ) -> Optional[int]:
__a : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Dict:
__a , __a , __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def __magic_name__ ( self ) -> Tuple:
__a , __a , __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __magic_name__ ( self ) -> Any:
# This regression test was failing with PyTorch < 1.3
__a , __a , __a , __a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__a : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __magic_name__ ( self ) -> Tuple:
__a , __a , __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def __magic_name__ ( self ) -> str:
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def __magic_name__ ( self ) -> List[str]:
__a : Any = 'abeja/gpt-neox-japanese-2.7b'
__a : List[str] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__a : Union[str, Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__a : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_A )
__a : Optional[int] = GPTNeoXJapaneseForCausalLM.from_pretrained(_A )
__a : Any = []
for prompt in prompts:
__a : Tuple = tokenizer(_A , return_tensors='pt' ).input_ids
__a : Optional[Any] = model.generate(_A , max_length=50 )
__a : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
predicted_outputs += generated_string
self.assertListEqual(_A , _A )
| 597 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int ):
snake_case__ : str = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
snake_case__ : int = load_file(_UpperCamelCase )
snake_case__ : List[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
snake_case__ : int = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
snake_case__ : Any = pipeline.text_encoder
else:
snake_case__ : Union[str, Any] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
snake_case__ : Optional[Any] = pipeline.unet
# find the target layer
snake_case__ : int = layer_infos.pop(0 )
while len(_UpperCamelCase ) > -1:
try:
snake_case__ : List[Any] = curr_layer.__getattr__(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case__ : Dict = layer_infos.pop(0 )
elif len(_UpperCamelCase ) == 0:
break
except Exception:
if len(_UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
snake_case__ : Union[str, Any] = layer_infos.pop(0 )
snake_case__ : str = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(_UpperCamelCase )
else:
pair_keys.append(_UpperCamelCase )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
snake_case__ : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
snake_case__ : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
snake_case__ : List[str] = state_dict[pair_keys[0]].to(torch.floataa )
snake_case__ : str = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCamelCase )
return pipeline
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__lowerCamelCase : int = parser.parse_args()
__lowerCamelCase : List[Any] = args.base_model_path
__lowerCamelCase : Any = args.checkpoint_path
__lowerCamelCase : str = args.dump_path
__lowerCamelCase : Optional[Any] = args.lora_prefix_unet
__lowerCamelCase : Tuple = args.lora_prefix_text_encoder
__lowerCamelCase : Dict = args.alpha
__lowerCamelCase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCamelCase : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def A_( A : Union[str, Any] , A : List[str] , A : Dict , A : Tuple , A : Any):
for attribute in key.split('.'):
UpperCamelCase = getattr(A , A)
if weight_type is not None:
UpperCamelCase = getattr(A , A).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def A_( A : int , A : Union[str, Any] , A : str):
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , A)
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(f'''Unused weights: {unused_weights}''')
def A_( A : str , A : int , A : int , A : List[str] , A : Tuple):
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A_( A : Dict , A : List[str] , A : Any=None , A : str=None , A : Optional[int]=True):
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(A , hidden_act='swish')
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(A)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols)
UpperCamelCase = os.path.join(A , 'vocab.json')
if not os.path.isdir(A):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A))
return
os.makedirs(A , exist_ok=A)
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(A , 'w' , encoding='utf-8') as vocab_handle:
json.dump(A , A)
UpperCamelCase = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A)
processor.save_pretrained(A)
UpperCamelCase = WavaVecaConformerForCTC(A)
else:
UpperCamelCase = WavaVecaConformerForPreTraining(A)
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining')
UpperCamelCase = fairseq.tasks.setup_task(A)
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A)
UpperCamelCase = model[0].eval()
recursively_load_weights(A , A , not is_finetuned)
hf_wavavec.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 1 |
"""simple docstring"""
from itertools import count
def lowercase ( a__ : int = 50 ) -> int:
_UpperCamelCase = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 342 | """simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase_ ( _lowercase , _lowercase):
snake_case__ = '''pixel_values'''
snake_case__ = False
snake_case__ = TimmBackboneConfig
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) -> Dict:
requires_backends(self , '''timm''' )
super().__init__(__UpperCamelCase )
_UpperCamelCase = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(__UpperCamelCase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_UpperCamelCase = getattr(__UpperCamelCase , '''use_pretrained_backbone''' , __UpperCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCamelCase = config.out_indices if getattr(__UpperCamelCase , '''out_indices''' , __UpperCamelCase ) is not None else (-1,)
_UpperCamelCase = timm.create_model(
config.backbone , pretrained=__UpperCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCamelCase , **__UpperCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCamelCase = self._backbone.return_layers
_UpperCamelCase = {layer['''module''']: str(__UpperCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCamelCase )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , __UpperCamelCase : Tuple , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() )
_UpperCamelCase = kwargs.pop('''use_timm_backbone''' , __UpperCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_UpperCamelCase = kwargs.pop('''num_channels''' , config.num_channels )
_UpperCamelCase = kwargs.pop('''features_only''' , config.features_only )
_UpperCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_UpperCamelCase = kwargs.pop('''out_indices''' , config.out_indices )
_UpperCamelCase = TimmBackboneConfig(
backbone=__UpperCamelCase , num_channels=__UpperCamelCase , features_only=__UpperCamelCase , use_pretrained_backbone=__UpperCamelCase , out_indices=__UpperCamelCase , )
return super()._from_config(__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int ) -> int:
pass
def _UpperCamelCase ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int=None , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , **__UpperCamelCase : int ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCamelCase = self._all_layers
_UpperCamelCase = self._backbone(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = self._return_layers
_UpperCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCamelCase = self._backbone(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = None
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = tuple(__UpperCamelCase ) if hidden_states is not None else None
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
_UpperCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCamelCase , hidden_states=__UpperCamelCase , attentions=__UpperCamelCase )
| 342 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase : Optional[Any] = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase , lowercase )
def __lowerCAmelCase ( self : int , **lowercase : List[str] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : Tuple , **lowercase : Tuple ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : Optional[int] , **lowercase : List[str] ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
UpperCAmelCase : List[Any] = self.get_image_processor(do_normalize=lowercase )
UpperCAmelCase : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Dict = self.prepare_image_inputs()
UpperCAmelCase : Tuple = image_processor(lowercase , return_tensors="np" )
UpperCAmelCase : Dict = processor(images=lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Tuple = "Alexandra,T-shirt的价格是15便士。"
UpperCAmelCase : int = processor(text=lowercase )
UpperCAmelCase : Dict = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : Tuple = self.get_tokenizer()
UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Union[str, Any] = "Alexandra,T-shirt的价格是15便士。"
UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase : str = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : str = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : List[str] = processor.batch_decode(lowercase )
UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Optional[Any] = "Alexandra,T-shirt的价格是15便士。"
UpperCAmelCase : Any = self.prepare_image_inputs()
UpperCAmelCase : Union[str, Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 595 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase_ ( _lowercase : Any , _lowercase : float = 0.0 , _lowercase : float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
a__ : Any = os.path.abspath(lowerCAmelCase__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
a__ : str = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
a__ : Tuple = convert_pytorch_state_dict_to_flax(lowerCAmelCase__ , lowerCAmelCase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
a__ : int = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase__ , lowerCAmelCase__ )
return flax_state_dict
def __a ( lowerCAmelCase__ : Tuple[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, jnp.ndarray] , lowerCAmelCase__ : str , ):
def is_key_or_prefix_key_in_dict(lowerCAmelCase__ : Tuple[str] ) -> bool:
return len(set(lowerCAmelCase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
a__ : Any = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
a__ : Optional[Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
a__ : Union[str, Any] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
a__ : Optional[Any] = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
a__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
a__ : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
a__ : str = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
a__ : Optional[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
a__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
a__ : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
# convert pytorch tensor to numpy
a__ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ : Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
a__ : Optional[Any] = flax_model.params['''params''']
else:
a__ : Tuple = flax_model.params
a__ : List[Any] = flatten_dict(lowerCAmelCase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ : Union[str, Any] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(lowerCAmelCase__ )
a__ : Optional[Any] = {}
a__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
a__ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
a__ : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ : int = rename_key_and_reshape_tensor(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# add model prefix if necessary
a__ : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
a__ : Any = jnp.asarray(lowerCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
a__ : int = jnp.asarray(lowerCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
a__ : Any = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
import torch
# Load the index
a__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
a__ : Union[str, Any] = torch.load(lowerCAmelCase__ )
a__ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ : Optional[int] = flax_model.params['''params''']
a__ : List[Any] = flatten_dict(lowerCAmelCase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
a__ : Tuple = flax_model.params
a__ : str = flatten_dict(lowerCAmelCase__ )
a__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
a__ : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Dict = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
a__ : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ : List[Any] = rename_key_and_reshape_tensor(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# add model prefix if necessary
a__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
a__ : int = jnp.asarray(lowerCAmelCase__ )
continue
if "var" in flax_key[-1]:
a__ : List[Any] = jnp.asarray(lowerCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
a__ : Optional[Any] = jnp.asarray(lowerCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
a__ : Any = os.path.abspath(lowerCAmelCase__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
a__ : Union[str, Any] = getattr(lowerCAmelCase__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase__ , '''rb''' ) as state_f:
try:
a__ : List[Any] = from_bytes(lowerCAmelCase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
a__ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase__ : x.dtype == jnp.bfloataa , lowerCAmelCase__ ) ).values()
if any(lowerCAmelCase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
a__ : Optional[int] = jax.tree_util.tree_map(
lambda lowerCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase__ )
a__ : List[Any] = flatten_dict(lowerCAmelCase__ )
a__ : Union[str, Any] = pt_model.state_dict()
a__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
a__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
a__ : Tuple = []
a__ : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
a__ : Any = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
a__ : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase__ ) not in pt_model_dict:
# conv layer
a__ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
a__ : int = jnp.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase__ ) not in pt_model_dict:
# linear layer
a__ : str = flax_key_tuple[:-1] + ('''weight''',)
a__ : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ : int = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
a__ : List[str] = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
a__ : List[Any] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
a__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
a__ : int = '''.'''.join(lowerCAmelCase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
a__ : str = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
a__ : List[str] = key.split('''.''' )
a__ : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
a__ : Union[str, Any] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
a__ : Tuple = key_components[-2] + '''_v'''
if name is not None:
a__ : List[str] = key_components[:-3] + [name]
a__ : Tuple = '''.'''.join(lowerCAmelCase__ )
a__ : Any = key
if flax_key in special_pt_names:
a__ : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
a__ : str = np.asarray(lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , np.ndarray ) else flax_tensor
a__ : List[Any] = torch.from_numpy(lowerCAmelCase__ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase__ )
pt_model.load_state_dict(lowerCAmelCase__ )
# re-transform missing_keys to list
a__ : Union[str, Any] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 340 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any]="<s>" , A__ : str="</s>" , A__ : Dict="</s>" , A__ : Union[str, Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : Any="<mask>" , A__ : List[str]=None , A__ : Union[str, Any]=None , A__ : Optional[int]=None , A__ : Optional[Dict[str, Any]] = None , A__ : int=None , A__ : Tuple=False , **A__ : str , ) -> List[Any]:
'''simple docstring'''
a__ : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
a__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
a__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : List[str] = 1
a__ : List[Any] = len(self.sp_model )
a__ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
a__ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
a__ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ : Tuple = src_lang if src_lang is not None else '''eng_Latn'''
a__ : Dict = self.lang_code_to_id[self._src_lang]
a__ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.__dict__.copy()
a__ : Optional[Any] = None
a__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , A__ : int ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ : str = {}
a__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : List[str] , A__ : str ) -> None:
'''simple docstring'''
a__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
a__ : List[str] = [1] * len(self.prefix_tokens )
a__ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __lowerCAmelCase ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Dict , A__ : Optional[int] , A__ : str , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a__ : List[Any] = src_lang
a__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
a__ : Union[str, Any] = self.convert_tokens_to_ids(A__ )
a__ : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Union[str, Any] , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __lowerCAmelCase ( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Tuple = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Any ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = ''''''.join(A__ ).replace(A__ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Optional[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Tuple = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
a__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] , A__ : str = "eng_Latn" , A__ : Optional[List[str]] = None , A__ : str = "fra_Latn" , **A__ : Any , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = src_lang
a__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a__ : Union[str, Any] = []
a__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Tuple = [self.cur_lang_code]
a__ : List[Any] = [self.eos_token_id]
def __lowerCAmelCase ( self : Dict , A__ : str ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a__ : Optional[Any] = []
a__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
a__ : List[str] = [self.cur_lang_code]
a__ : str = [self.eos_token_id]
| 340 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : List[str] = BertJapaneseTokenizer
a : Optional[Any] = False
a : Optional[int] = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : List[str] ):
'''simple docstring'''
__lowercase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(A_ )
__lowercase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowercase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=A_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=A_ , normalize_text=A_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=A_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowercase = {}
for i, token in enumerate(A_ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=A_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(A_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowercase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(A_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowercase = tokenizer.encode("""ありがとう。""" , add_special_tokens=A_ )
__lowercase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Union[str, Any] = BertJapaneseTokenizer
a : Any = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().setUp()
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **A_ : Dict ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Tuple ):
'''simple docstring'''
__lowercase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowercase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
A_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase = {}
for i, token in enumerate(A_ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=A_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowercase = tokenizer.encode("""ありがとう。""" , add_special_tokens=A_ )
__lowercase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """cl-tohoku/bert-base-japanese"""
__lowercase = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(A_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowercase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(A_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 616 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ =cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
UpperCAmelCase__ =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ , UpperCAmelCase__ =gray_img.shape
# set different points to rotate image
UpperCAmelCase__ =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase__ =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase__ =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase__ =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ =plt.figure(1)
UpperCAmelCase__ =["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 616 | 1 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray, a_: float = 1e-1_2, a_: int = 100, ):
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
_UpperCAmelCase : int = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = 1e1_2
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase : List[str] = np.dot(a_, a_ )
# Normalize the resulting output vector.
_UpperCAmelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
_UpperCAmelCase : Dict = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
_UpperCAmelCase : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[int] = lambda_
if is_complex:
_UpperCAmelCase : Optional[int] = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ):
_UpperCAmelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase : Union[str, Any] = np.array([41, 4, 20] )
_UpperCAmelCase : List[Any] = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase : int = np.triu(1J * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase : Optional[int] = real_input_matrix
_UpperCAmelCase : Tuple = real_vector
elif problem_type == "complex":
_UpperCAmelCase : Union[str, Any] = complex_input_matrix
_UpperCAmelCase : List[str] = complex_vector
# Our implementation.
_UpperCAmelCase , _UpperCAmelCase : str = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
_UpperCAmelCase : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 257 | '''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = DebertaVaTokenizer
UpperCamelCase_ : List[str] = DebertaVaTokenizerFast
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "this is a test"
_UpperCAmelCase : Dict = "this is a test"
return input_text, output_text
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = "<pad>"
_UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0_0_0_1 )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_UpperCAmelCase : Dict = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[str] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : List[str] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_UpperCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "This is a test"
_UpperCAmelCase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_UpperCAmelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : int = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_UpperCAmelCase : Optional[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = DebertaVaTokenizer(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" )
_UpperCAmelCase : int = tokenizer.encode("multi-sequence build" )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 257 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : List[str] ):
_A = 0
_A = len(__snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
_A = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_A = left
_A = point
elif point > right:
_A = right
_A = point
else:
if item < current_item:
_A = point - 1
else:
_A = point + 1
return None
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : List[str] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
elif point > right:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__snake_case , __snake_case , __snake_case , point - 1 )
else:
return interpolation_search_by_recursion(
__snake_case , __snake_case , point + 1 , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
if collection != sorted(__snake_case ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_UpperCAmelCase : int = 0
if debug == 1:
_UpperCAmelCase : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_UpperCAmelCase : str = 67
_UpperCAmelCase : Any = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 107 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = KandinskyInpaintPipeline
a_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def _lowercase ( self : int ):
return 3_2
@property
def _lowercase ( self : Any ):
return 3_2
@property
def _lowercase ( self : Dict ):
return self.time_input_dim
@property
def _lowercase ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def _lowercase ( self : List[str] ):
return 1_0_0
@property
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case__ : List[str] = MultilingualCLIP(__A )
snake_case__ : List[str] = text_encoder.eval()
return text_encoder
@property
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case__ : List[Any] = UNetaDConditionModel(**__A )
return model
@property
def _lowercase ( self : Dict ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : Any = self.dummy_unet
snake_case__ : List[Any] = self.dummy_movq
snake_case__ : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="epsilon" , thresholding=__A , )
snake_case__ : Union[str, Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase ( self : Any , __A : Union[str, Any] , __A : int=0 ):
snake_case__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
snake_case__ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__A ) ).to(__A )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Optional[int] = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : str = np.ones((6_4, 6_4) , dtype=np.floataa )
snake_case__ : str = 0
if str(__A ).startswith("mps" ):
snake_case__ : Optional[Any] = torch.manual_seed(__A )
else:
snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[int] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = "cpu"
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Tuple = self.pipeline_class(**__A )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = pipe(**self.get_dummy_inputs(__A ) )
snake_case__ : List[Any] = output.images
snake_case__ : List[str] = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : int = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _lowercase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
snake_case__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case__ : Optional[Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
snake_case__ : List[Any] = 0
snake_case__ : str = "a hat"
snake_case__ : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
snake_case__ : Dict = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
snake_case__ : Union[str, Any] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case__, snake_case__ : str = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case__ : int = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__A , __A )
| 297 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowercase : List[str] =logging.get_logger(__name__)
def A__ ( lowercase: Optional[Any], lowercase: int ) -> str:
try:
with open(lowercase, 'rb' ) as flax_state_f:
A : int =from_bytes(lowercase, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase, lowercase )
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
A : Optional[Any] =flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa, lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
A : Any =jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowercase )
A : int =''
A : Optional[int] =flatten_dict(lowercase, sep='.' )
A : Tuple =pt_model.state_dict()
# keep track of unexpected & missing keys
A : Tuple =[]
A : List[str] =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] =flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A : Any =flax_key_tuple_array[:-1] + ['weight']
A : List[str] =jnp.transpose(lowercase, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A : List[Any] =flax_key_tuple_array[:-1] + ['weight']
A : Optional[int] =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A : Optional[int] =flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase ):
A : Any =(
flax_key_tuple_string.replace('_0', '.0' )
.replace('_1', '.1' )
.replace('_2', '.2' )
.replace('_3', '.3' )
.replace('_4', '.4' )
.replace('_5', '.5' )
.replace('_6', '.6' )
.replace('_7', '.7' )
.replace('_8', '.8' )
.replace('_9', '.9' )
)
A : Optional[Any] ='.'.join(lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : List[str] =np.asarray(lowercase ) if not isinstance(lowercase, np.ndarray ) else flax_tensor
A : str =torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
A : Union[str, Any] =list(lowercase )
if len(lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 661 | import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 1 |
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , _A : int = 6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Node()
__SCREAMING_SNAKE_CASE : List[Any] = current_node
__SCREAMING_SNAKE_CASE : Optional[Any] = current_node
__SCREAMING_SNAKE_CASE : Union[str, Any] = current_node
for _ in range(1 , _A ):
__SCREAMING_SNAKE_CASE : Dict = Node()
__SCREAMING_SNAKE_CASE : List[Any] = current_node
__SCREAMING_SNAKE_CASE : Optional[int] = previous_node
__SCREAMING_SNAKE_CASE : Any = current_node
__SCREAMING_SNAKE_CASE : List[Any] = self.front
__SCREAMING_SNAKE_CASE : List[Any] = previous_node
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase__ ( self : str , _A : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__SCREAMING_SNAKE_CASE : List[str] = self.rear.next
if self.rear:
__SCREAMING_SNAKE_CASE : Any = data
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__SCREAMING_SNAKE_CASE : Dict = self.front.data
__SCREAMING_SNAKE_CASE : Optional[int] = None
return data
__SCREAMING_SNAKE_CASE : List[str] = self.front
__SCREAMING_SNAKE_CASE : int = old_front.next
__SCREAMING_SNAKE_CASE : Dict = old_front.data
__SCREAMING_SNAKE_CASE : str = None
return data
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = len(lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
UpperCamelCase_ : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase_ : Tuple = True
for i in range(lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase_ : str = True
if a[i].islower():
UpperCamelCase_ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 | 0 |
import torch
from torch import nn
class lowercase ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[int]=1 , _lowercase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE__ : int = n_token
SCREAMING_SNAKE_CASE__ : Optional[int] = d_embed
SCREAMING_SNAKE_CASE__ : Optional[int] = d_proj
SCREAMING_SNAKE_CASE__ : Dict = cutoffs + [n_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] + self.cutoffs
SCREAMING_SNAKE_CASE__ : List[Any] = div_val
SCREAMING_SNAKE_CASE__ : Any = self.cutoffs[0]
SCREAMING_SNAKE_CASE__ : int = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowercase , _lowercase ) ) )
else:
self.out_projs.append(_lowercase )
self.out_layers.append(nn.Linear(_lowercase , _lowercase ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ : Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowercase , _lowercase ) ) )
self.out_layers.append(nn.Linear(_lowercase , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_order
def lowercase__ ( self : Any , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
if proj is None:
SCREAMING_SNAKE_CASE__ : int = nn.functional.linear(_lowercase , _lowercase , bias=_lowercase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.linear(_lowercase , proj.t().contiguous() )
SCREAMING_SNAKE_CASE__ : str = nn.functional.linear(_lowercase , _lowercase , bias=_lowercase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : int , _lowercase : int , _lowercase : List[str]=None , _lowercase : Union[str, Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE__ : List[Any] = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ : List[str] = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE__ : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE__ : Tuple = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ : Dict = self._compute_logit(_lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE__ : int = labels != -1_00
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(_lowercase , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
-nn.functional.log_softmax(_lowercase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.functional.log_softmax(_lowercase , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ : str = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ : int = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowercase )
biases.append(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self._compute_logit(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.functional.log_softmax(_lowercase , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros_like(_lowercase , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Tuple = [0] + self.cutoffs
for i in range(len(_lowercase ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE__ : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE__ : List[Any] = labels.index_select(0 , _lowercase ) - l_idx
SCREAMING_SNAKE_CASE__ : str = head_logprob.index_select(0 , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = hidden.index_select(0 , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : Dict = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ : int = self._compute_logit(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = nn.functional.log_softmax(_lowercase , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _lowercase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : Dict , _lowercase : Optional[int] ):
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = self._compute_logit(_lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_lowercase , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ : List[str] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ : int = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowercase )
biases.append(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ : List[str] = self._compute_logit(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.log_softmax(_lowercase , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(_lowercase ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ : int = self._compute_logit(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : str = nn.functional.log_softmax(_lowercase , dim=1 )
SCREAMING_SNAKE_CASE__ : str = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE__ : str = logprob_i
return out
| 250 |
from math import pow
def a ( A__ , A__ , A__ , A__ , A__ , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE__ : Optional[int] = int(pow(A__ , A__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = backtrack(
A__ , A__ , current_number + 1 , A__ , A__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = backtrack(
A__ , A__ , current_number + 1 , A__ , A__ )
return current_sum, solutions_count
def a ( A__ , A__ ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(A__ , A__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowerCAmelCase__ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase__ = '▁'
class _lowerCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_: Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : Tuple = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = do_lower_case
_SCREAMING_SNAKE_CASE : int = remove_space
_SCREAMING_SNAKE_CASE : int = keep_accents
_SCREAMING_SNAKE_CASE : Dict = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ) -> int:
return len(self.sp_model )
def A ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Dict = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : List[Any] = None
return state
def __setstate__( self , lowerCAmelCase_ ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE : Dict = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , lowerCAmelCase_ ) -> Any:
if self.remove_space:
_SCREAMING_SNAKE_CASE : Dict = ' '.join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : List[str] = inputs
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : Tuple = unicodedata.normalize('NFKD' , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : str = ''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Any = outputs.lower()
return outputs
def A ( self , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = self.preprocess_text(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Any = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def A ( self , lowerCAmelCase_ ) -> Dict:
return self.sp_model.PieceToId(__lowerCAmelCase )
def A ( self , lowerCAmelCase_ ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCAmelCase )
def A ( self , lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = ''
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Dict = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : int = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 621 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Tuple = """open-llama"""
def __init__( self , _A=1_0_0_0_0_0 , _A=4_0_9_6 , _A=1_1_0_0_8 , _A=3_2 , _A=3_2 , _A="silu" , _A=2_0_4_8 , _A=0.02 , _A=1E-6 , _A=True , _A=0 , _A=1 , _A=2 , _A=False , _A=True , _A=0.1 , _A=0.1 , _A=True , _A=True , _A=None , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = rms_norm_eps
__lowerCAmelCase = use_cache
__lowerCAmelCase = kwargs.pop(
"use_memorry_efficient_attention" , _A )
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_dropout_prob
__lowerCAmelCase = use_stable_embedding
__lowerCAmelCase = shared_input_output_embedding
__lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
__lowerCAmelCase = self.rope_scaling.get("type" , _A )
__lowerCAmelCase = self.rope_scaling.get("factor" , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 711 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a__ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForMaskedLM(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForSequenceClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForTokenClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_a : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : str = False
_a : List[str] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_A )
@require_tf
class a__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__lowerCAmelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(_A , attention_mask=_A )[0]
__lowerCAmelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 )
| 552 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class lowerCamelCase ( _A , unittest.TestCase ):
snake_case_ = BartphoTokenizer
snake_case_ = False
snake_case_ = True
def _lowerCamelCase ( self ):
super().setUp()
lowerCAmelCase : Optional[int] = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase : Any = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase : Any = {"unk_token": "<unk>"}
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
lowerCAmelCase : List[str] = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a_ )
def _lowerCamelCase ( self , a_ ):
lowerCAmelCase : List[Any] = "This is a là test"
lowerCAmelCase : Union[str, Any] = "This is a<unk><unk> test"
return input_text, output_text
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase : Dict = "This is a là test"
lowerCAmelCase : int = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase : int = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
| 525 |
'''simple docstring'''
lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __A ( a_ : int ):
lowerCAmelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase = [None] * 10_00_00_00
lowerCAmelCase = True
lowerCAmelCase = False
def __A ( a_ : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase : Dict = chain(next_number(a_ ) )
lowerCAmelCase : Union[str, Any] = number_chain
while number < 1_0_0_0_0_0_0_0:
lowerCAmelCase : Any = number_chain
number *= 1_0
return number_chain
def __A ( a_ : int = 1_0_0_0_0_0_0_0 ):
for i in range(1 ,a_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 525 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]]):
super().__init__()
_lowercase: List[Any] = nn.ModuleList(_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Union[torch.Tensor, float, int] , _UpperCamelCase : torch.Tensor , _UpperCamelCase : List[torch.tensor] , _UpperCamelCase : List[float] , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[Dict[str, Any]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_UpperCamelCase , _UpperCamelCase , self.nets)):
_lowercase: Optional[int] = controlnet(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# merge samples
if i == 0:
_lowercase: str = down_samples, mid_sample
else:
_lowercase: Dict = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_UpperCamelCase , _UpperCamelCase)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : bool = True , _UpperCamelCase : Callable = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[str] = None , ):
_lowercase: Union[str, Any] = 0
_lowercase: str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_UpperCamelCase , is_main_process=_UpperCamelCase , save_function=_UpperCamelCase , safe_serialization=_UpperCamelCase , variant=_UpperCamelCase , )
idx += 1
_lowercase: List[Any] = model_path_to_save + f"_{idx}"
@classmethod
def UpperCAmelCase__ ( cls : Dict , _UpperCamelCase : Optional[Union[str, os.PathLike]] , **_UpperCamelCase : int):
_lowercase: Any = 0
_lowercase: List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowercase: Tuple = pretrained_model_path
while os.path.isdir(_UpperCamelCase):
_lowercase: Union[str, Any] = ControlNetModel.from_pretrained(_UpperCamelCase , **_UpperCamelCase)
controlnets.append(_UpperCamelCase)
idx += 1
_lowercase: str = pretrained_model_path + f"_{idx}"
logger.info(f"{len(_UpperCamelCase)} controlnets loaded from {pretrained_model_path}.")
if len(_UpperCamelCase) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(_UpperCamelCase)}. Expected at least {pretrained_model_path + '_0'}.")
return cls(_UpperCamelCase)
| 719 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModel)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 206 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> float:
_UpperCamelCase : Optional[Any] = u
for i in range(1 ,UpperCamelCase ):
_UpperCamelCase : Optional[int] = temp * (u - i)
return temp
def snake_case__ ( ) -> None:
_UpperCamelCase : Union[str, Any] = int(input('''enter the numbers of values: ''' ) )
_UpperCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase ):
y.append([] )
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
y[i].append(UpperCamelCase )
_UpperCamelCase : Tuple = 0
print('''enter the values of parameters in a list: ''' )
_UpperCamelCase : int = list(map(UpperCamelCase ,input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase ):
_UpperCamelCase : Dict = float(input() )
_UpperCamelCase : List[Any] = int(input('''enter the value to interpolate: ''' ) )
_UpperCamelCase : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,UpperCamelCase ):
for j in range(n - i ):
_UpperCamelCase : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_UpperCamelCase : Any = y[0][0]
for i in range(1 ,UpperCamelCase ):
summ += (ucal(UpperCamelCase ,UpperCamelCase ) * y[0][i]) / math.factorial(UpperCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 683 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Tuple = logging.get_logger(__name__)
__a : List[str] = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """visual_bert"""
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 414 |
from math import factorial
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase = float(factorial(lowercase_ ) )
coefficient /= factorial(lowercase_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 414 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__snake_case : Tuple = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
__snake_case : str = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 26 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 561 | 0 |
from __future__ import annotations
_lowercase = [True] * 1000001
_lowercase = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
_lowercase = False
i += 1
def UpperCamelCase ( snake_case__):
return seive[n]
def UpperCamelCase ( snake_case__):
return any(digit in "02468" for digit in str(snake_case__))
def UpperCamelCase ( snake_case__ = 1_00_00_00):
lowerCAmelCase_ : Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2):
if is_prime(snake_case__) and not contains_an_even_digit(snake_case__):
lowerCAmelCase_ : Optional[Any] = str(snake_case__)
lowerCAmelCase_ : str = [int(str_num[j:] + str_num[:j]) for j in range(len(snake_case__))]
if all(is_prime(snake_case__) for i in list_nums):
result.append(snake_case__)
return result
def UpperCamelCase ( ):
return len(find_circular_primes())
if __name__ == "__main__":
print(f"{len(find_circular_primes()) = }")
| 718 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
lowercase_ : Any = "MCTCTFeatureExtractor"
lowercase_ : Tuple = "AutoTokenizer"
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ = kwargs.pop('''audio''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''sampling_rate''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''text''' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings['''input_ids''']
return inputs
def A__ ( self : Any , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''input_features''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''labels''' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if input_features is not None:
lowerCAmelCase__ = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
lowerCAmelCase__ = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase__ = labels['''input_ids''']
return input_features
def A__ ( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def A__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 615 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
for i in range(0 , _SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
for i in range(_SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(_SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(_SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
SCREAMING_SNAKE_CASE : str = 1
while K:
SCREAMING_SNAKE_CASE : Optional[int] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE : str = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 709 | from math import sqrt
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase_ : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase_ : Tuple = False
for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase_ : Union[str, Any] = False
break
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase_ : Dict = list(range(2 , n + 1 ) )
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase_ : Optional[int] = 0
# filters actual prime numbers.
UpperCamelCase_ : Union[str, Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_SCREAMING_SNAKE_CASE ):
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase_ : Optional[int] = 2
UpperCamelCase_ : Dict = number
if number == 0 or number == 1:
ans.append(_SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(_SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Tuple = 0
# prime factorization of 'number'
UpperCamelCase_ : int = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = max(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Any = 0
# prime factorization of 'number'
UpperCamelCase_ : Dict = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
UpperCamelCase_ : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase_ : int = get_prime_numbers(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = len(_SCREAMING_SNAKE_CASE )
# run variable for while-loops.
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = None
# exit variable. for break up the loops
UpperCamelCase_ : Any = True
while i < len_pn and loop:
UpperCamelCase_ : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (len(_SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : List[Any] = 0
while numbera != 0:
UpperCamelCase_ : List[str] = numbera % numbera
UpperCamelCase_ : Optional[Any] = numbera
UpperCamelCase_ : Optional[Any] = rest
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
UpperCamelCase_ : int = []
UpperCamelCase_ : Dict = []
UpperCamelCase_ : List[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = 0
UpperCamelCase_ : int = 0
UpperCamelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase_ : List[str] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
UpperCamelCase_ : Tuple = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase_ : str = 0
UpperCamelCase_ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime(
_SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase_ : Optional[int] = p_number_a + 1 # jump to the next number
UpperCamelCase_ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(_SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase_ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase_ : Dict = get_divisors(_SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase_ : List[Any] = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase_ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : List[str] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase_ : List[Any] = ans
ans += fiba
UpperCamelCase_ : Any = tmp
return ans
| 138 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase_ = ["gpt2"]
lowercase_ = "gpt2"
if is_tf_available():
class __A ( tf.Module ):
'''simple docstring'''
def __init__(self , A ) -> List[Any]:
"""simple docstring"""
super().__init__()
_a = tokenizer
_a = AutoConfig.from_pretrained(A )
_a = TFGPTaLMHeadModel.from_config(A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a = self.tokenizer(A )
_a = tokenized['''input_ids'''].to_tensor()
_a = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_a = self.model(input_ids=A , attention_mask=A )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Any:
"""simple docstring"""
super().setUp()
_a = [GPTaTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_a = [TFGPTaTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_a = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_a = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ (self ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_a = tokenizer([test_inputs] , return_tensors='''tf''' )
_a = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_a = python_outputs[key].numpy()
_a = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A , tf.intaa ) == tf_outputs_values ) )
@slow
def a__ (self ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = tf.function(A )
for test_inputs in self.test_sentences:
_a = tf.constant(A )
_a = compiled_tokenizer(A )
_a = tf_tokenizer(A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = ModelToSave(tokenizer=A )
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = model.serving(A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_a = Path(A ) / '''saved.model'''
tf.saved_model.save(A , A , signatures={'''serving_default''': model.serving} )
_a = tf.saved_model.load(A )
_a = loaded_model.signatures['''serving_default'''](A )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = tf_tokenizer(A ) # Build model with some sample inputs
_a = tf_tokenizer.get_config()
_a = TFGPTaTokenizer.from_config(A )
_a = model_from_config(A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_a = 123_123
for max_length in [3, 5, 1_024]:
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = tf_tokenizer(A , max_length=A )
_a = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 11 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( a : int , a : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__snake_case : List[str] =number_of_bytes // partitions
__snake_case : str =[]
for i in range(a ):
__snake_case : Optional[Any] =i * bytes_per_partition + 1
__snake_case : Any =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCamelCase_ : Optional[Any] = 6_37_81_37.0
UpperCamelCase_ : Optional[Any] = 6_35_67_52.31_42_45
UpperCamelCase_ : Union[str, Any] = 6378137
def __lowercase ( a : float , a : float , a : float , a : float ) -> float:
__snake_case : Optional[Any] =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case : List[str] =atan((1 - flattening) * tan(radians(a ) ) )
__snake_case : Any =atan((1 - flattening) * tan(radians(a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case : Optional[Any] =haversine_distance(a , a , a , a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case : List[str] =(b_lata + b_lata) / 2
__snake_case : Dict =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case : Optional[int] =(sin(a ) ** 2) * (cos(a ) ** 2)
__snake_case : Any =cos(sigma / 2 ) ** 2
__snake_case : Optional[Any] =(sigma - sin(a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case : Tuple =(cos(a ) ** 2) * (sin(a ) ** 2)
__snake_case : Dict =sin(sigma / 2 ) ** 2
__snake_case : Tuple =(sigma + sin(a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 1 |
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''lxmert'''
UpperCAmelCase__ : Any = {}
def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 335 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : int = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : str = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
lowercase_ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ : str = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase_ : int = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 721 | '''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def UpperCamelCase_( ) -> Union[str, Any]:
UpperCAmelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_( snake_case__: int ) -> List[Any]:
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class lowercase ( lowercase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__(self , __a , __a , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , **__a , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ = json.load(__lowercase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='utf-8' ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ (self , __a ) -> Optional[int]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = get_pairs(__lowercase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__lowercase , key=lambda __a : self.bpe_ranks.get(__lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__lowercase ):
try:
UpperCAmelCase__ = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = new_word
if len(__lowercase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__lowercase )
UpperCAmelCase__ = """ """.join(__lowercase )
UpperCAmelCase__ = word
return word
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = []
for token in re.findall(self.pat , __lowercase ):
UpperCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(__lowercase )
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = """""".join(__lowercase )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '\n' )
UpperCAmelCase__ = 0
with open(__lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase__ = token_index
writer.write(' '.join(__lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ (self , __a , __a = None , __a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ (self , __a , __a=False , **__a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = """ """ + text
return (text, kwargs)
| 146 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Any , __A : List[Any] , __A : Optional[int] ) -> Tuple:
'''simple docstring'''
with open(__A ) as metadata_file:
snake_case : Union[str, Any] = json.load(__A )
snake_case : Any = LukeConfig(use_entity_aware_attention=__A , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
snake_case : Dict = load_original_entity_vocab(__A )
# add an entry for [MASK2]
snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Any = AddedToken("""<ent>""" , lstrip=__A , rstrip=__A )
snake_case : str = AddedToken("""<ent2>""" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , """tokenizer_config.json""" ) , """r""" ) as f:
snake_case : int = json.load(__A )
snake_case : Any = """MLukeTokenizer"""
with open(os.path.join(__A , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__A , __A )
snake_case : Any = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
snake_case : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
snake_case : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Dict = word_emb[enta_init_index].unsqueeze(0 )
snake_case : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Any = state_dict[bias_name]
snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
snake_case : List[str] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case : Dict = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Dict = state_dict["""entity_predictions.bias"""]
snake_case : Dict = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Optional[int] = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
snake_case : Dict = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
snake_case : List[Any] = state_dict[key]
else:
snake_case : str = state_dict[key]
snake_case , snake_case : int = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : str = MLukeTokenizer.from_pretrained(__A , task="""entity_classification""" )
snake_case : Optional[int] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
snake_case : Dict = (0, 9)
snake_case : Optional[Any] = tokenizer(__A , entity_spans=[span] , return_tensors="""pt""" )
snake_case : Any = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 33, 768) )
snake_case : List[str] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : List[str] = torch.Size((1, 1, 768) )
snake_case : Optional[Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[Any] = MLukeTokenizer.from_pretrained(__A )
snake_case : List[str] = """Tokyo is the capital of <mask>."""
snake_case : Tuple = (24, 30)
snake_case : Optional[Any] = tokenizer(__A , entity_spans=[span] , return_tensors="""pt""" )
snake_case : Any = model(**__A )
snake_case : str = encoding["""input_ids"""][0].tolist()
snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
snake_case : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
snake_case : int = outputs.entity_logits[0][0].argmax().item()
snake_case : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__A ) )
model.save_pretrained(__A )
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
snake_case : Optional[int] = [json.loads(__A ) for line in open(__A )]
snake_case : Union[str, Any] = {}
for entry in data:
snake_case : str = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Optional[int] = entity_id
break
snake_case : List[Any] = f"""{language}:{entity_name}"""
snake_case : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 315 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowercase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __A : Optional[int] , __A : List[str] , __A : str ) -> Dict:
'''simple docstring'''
snake_case : Any = state_dict.pop(__A )
snake_case : Optional[Any] = val
def lowercase ( __A : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case : List[Any] = value
else:
snake_case : List[str] = value
return new_state_dict
def lowercase ( __A : List[Any] , __A : List[Any]=False ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = """"""
if is_panoptic:
snake_case : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[Any] = in_proj_weight[:256, :]
snake_case : Optional[Any] = in_proj_bias[:256]
snake_case : Tuple = in_proj_weight[256:512, :]
snake_case : Any = in_proj_bias[256:512]
snake_case : int = in_proj_weight[-256:, :]
snake_case : Any = in_proj_bias[-256:]
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Union[str, Any] , __A : int ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case : Optional[Any] = """resnet101"""
if "dc5" in model_name:
snake_case : Optional[Any] = True
snake_case : Optional[int] = """panoptic""" in model_name
if is_panoptic:
snake_case : str = 250
else:
snake_case : Dict = 91
snake_case : Dict = """huggingface/label-files"""
snake_case : List[str] = """coco-detection-id2label.json"""
snake_case : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Tuple = {int(__A ): v for k, v in idalabel.items()}
snake_case : List[str] = idalabel
snake_case : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
snake_case : Dict = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case : Any = ConditionalDetrImageProcessor(format=__A )
# prepare image
snake_case : List[str] = prepare_img()
snake_case : List[str] = image_processor(images=__A , return_tensors="""pt""" )
snake_case : Dict = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""" , __A , pretrained=__A ).eval()
snake_case : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case : Any = """conditional_detr.""" + src
rename_key(__A , __A , __A )
snake_case : Optional[int] = rename_backbone_keys(__A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : Tuple = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case : str = state_dict.pop(__A )
snake_case : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case : Dict = state_dict.pop(__A )
snake_case : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case : Optional[int] = state_dict.pop(__A )
snake_case : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case : str = ConditionalDetrForSegmentation(__A ) if is_panoptic else ConditionalDetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
model.push_to_hub(repo_id=__A , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case : Tuple = conditional_detr(__A )
snake_case : Tuple = model(__A )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 315 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowercase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __a ( A__ ) -> Optional[Any]:
with open(A__ , "rb" ) as f:
lowerCAmelCase = Image.open(A__ )
return im.convert("RGB" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCamelCase_ )} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( A__ ) -> Any:
lowerCAmelCase = torch.stack([example["pixel_values"] for example in examples] )
lowerCAmelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A__ , A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = {}
if data_args.train_dir is not None:
lowerCAmelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowerCAmelCase = os.path.join(data_args.validation_dir , "**" )
lowerCAmelCase = load_dataset(
"imagefolder" , data_files=A__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split )
lowerCAmelCase = split["train"]
lowerCAmelCase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase = dataset["train"].features["labels"].names
lowerCAmelCase , lowerCAmelCase = {}, {}
for i, label in enumerate(A__ ):
lowerCAmelCase = str(A__ )
lowerCAmelCase = label
# Load the accuracy metric from the datasets package
lowerCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel=A__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCAmelCase = image_processor.size["shortest_edge"]
else:
lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"])
lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCAmelCase = Compose(
[
RandomResizedCrop(A__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCAmelCase = Compose(
[
Resize(A__ ),
CenterCrop(A__ ),
ToTensor(),
normalize,
] )
def train_transforms(A__ ):
lowerCAmelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A__ ):
lowerCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCAmelCase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCAmelCase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A__ )
# Initalize our trainer
lowerCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
# Write model card and (optionally) push to hub
lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
if __name__ == "__main__":
main()
| 649 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase_ :
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42 # [batch_size x 3]
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase_ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ , *UpperCAmelCase_ : Any = self.shape
UpperCAmelCase_ : Any = int(np.prod(lowerCAmelCase_ ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Dict = self.get_camera_rays(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rays.view(lowerCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(lowerCAmelCase_ , -1 , 2 )
UpperCAmelCase_ : Any = self.resolution()
UpperCAmelCase_ : str = self.fov()
UpperCAmelCase_ : List[str] = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : List[str] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Union[str, Any] = fracs.view(lowerCAmelCase_ , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(lowerCAmelCase_ , 1 , 3 )
+ self.x.view(lowerCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase_ , *lowerCAmelCase_ , 2 , 3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[str] = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
UpperCAmelCase_ : str = np.array([np.sin(A__ ), np.cos(A__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Union[str, Any] = -z * 4
UpperCAmelCase_ : Optional[Any] = np.array([np.cos(A__ ), -np.sin(A__ ), 0.0] )
UpperCAmelCase_ : str = np.cross(A__ ,A__ )
origins.append(A__ )
xs.append(A__ )
ys.append(A__ )
zs.append(A__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(A__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(A__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(A__ ,axis=0 ) ).float() ,width=A__ ,height=A__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(A__ )) ,)
| 463 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase_ = {
'''facebook/xglm-564M''': 2048,
}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ) -> None:
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : Dict = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase_ : Optional[int] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCAmelCase_ : List[str] = len(self.sp_model )
UpperCAmelCase_ : List[Any] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase_ : Any ) -> Any:
UpperCAmelCase_ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ ))
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ ))
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Optional[int] = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = "".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 463 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( lowercase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : str = analyze_text(lowercase__ )
lowerCAmelCase_ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase_ : Any = sum(single_char_strings.values() )
# one length string
lowerCAmelCase_ : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase_ : Union[str, Any] = single_char_strings[ch]
lowerCAmelCase_ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCAmelCase_ : Any = sum(two_char_strings.values() )
lowerCAmelCase_ : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase_ : List[str] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase_ : Any = two_char_strings[sequence]
lowerCAmelCase_ : Optional[Any] = int(lowercase__ ) / all_sum
my_sec_sum += prob * math.loga(lowercase__ )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __UpperCamelCase ( lowercase__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
lowerCAmelCase_ : Any = Counter() # type: ignore
lowerCAmelCase_ : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 600 |
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = len(lowercase__ )
lowerCAmelCase_ : int = len(lowercase__ )
lowerCAmelCase_ : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase_ : List[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase_ : Dict = True
if a[i].islower():
lowerCAmelCase_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Linear(3 , 4 )
lowerCamelCase_ = nn.BatchNormad(4 )
lowerCamelCase_ = nn.Linear(4 , 5 )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , model.state_dict() )
lowerCamelCase_ = os.path.join(__lowerCamelCase , '''index.json''' )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCamelCase_ = os.path.join(__lowerCamelCase , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCamelCase_ = torch.randn(2 , 3 , dtype=__lowerCamelCase )
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = offload_weight(__lowerCamelCase , '''weight''' , __lowerCamelCase , {} )
lowerCamelCase_ = os.path.join(__lowerCamelCase , '''weight.dat''' )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
self.assertDictEqual(__lowerCamelCase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__lowerCamelCase ).split('''.''' )[1]}} )
lowerCamelCase_ = load_offloaded_weight(__lowerCamelCase , index['''weight'''] )
self.assertTrue(torch.equal(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = ModelForTest()
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowerCamelCase_ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase_ = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
lowerCamelCase_ = {k: v for k, v in state_dict.items() if 'weight' in k}
lowerCamelCase_ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase_ = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
# Duplicates are removed
lowerCamelCase_ = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowerCamelCase_ = extract_submodules_state_dict(__lowerCamelCase , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__lowerCamelCase , {'''a.1''': 0, '''a.2''': 2} )
lowerCamelCase_ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowerCamelCase_ = extract_submodules_state_dict(__lowerCamelCase , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__lowerCamelCase , {'''a.1.a''': 0, '''a.2.a''': 2} ) | 142 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase_ ( lowerCAmelCase_ ):
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Tuple = tempfile.mkdtemp()
snake_case__ : Dict = 8
# DPR tok
snake_case__ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Tuple = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
snake_case__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Any = {'unk_token': '<unk>'}
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Dict = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : int = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCamelCase ) )
def _lowerCAmelCase ( self : Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCAmelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
snake_case__ : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Optional[Any] = dataset
snake_case__ : List[Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : bool ):
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case__ : List[Any] = os.path.join(self.tmpdirname , 'dataset' )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case__ : Optional[int] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : Union[str, Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , )
return retriever
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case__ : Optional[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__lowerCamelCase , open(__lowerCamelCase , 'wb' ) )
snake_case__ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case__ : List[str] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : int = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : List[Any] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Any = 1
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : int ):
snake_case__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Any = 1
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Tuple = 1
snake_case__ : Tuple = self.get_dummy_legacy_index_retriever()
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : Dict = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Any ):
import torch
snake_case__ : Optional[Any] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : List[Any] = [[5, 7], [10, 11]]
snake_case__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Any = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
snake_case__ , snake_case__ , snake_case__ : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , np.ndarray )
snake_case__ : Dict = retriever(
__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors='pt' , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : List[Any] = 1
snake_case__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
retriever.set_ctx_encoder_tokenizer(__lowerCamelCase )
snake_case__ : Optional[int] = [[5, 7], [10, 11]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
self.assertEqual(
len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
| 270 | 0 |
def lowerCamelCase_ ( _lowercase ) -> list[int]:
__A : Dict = len(snake_case__ )
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if numbers[j] < numbers[i]:
__A , __A : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 717 | from collections import Counter
from timeit import timeit
def lowerCamelCase_ ( _lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowerCamelCase_ ( _lowercase = "" ) -> bool:
if len(_lowercase ) == 0:
return True
__A : List[Any] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__A : dict[str, int] = {}
for character in lower_case_input_str:
__A : str = character_freq_dict.get(_lowercase , 0 ) + 1
__A : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase_ ( _lowercase = "" ) -> None:
print("\nFor string = " , _lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCamelCase = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
UpperCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 387 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> bool:
__lowerCamelCase : Any =False
if low == high:
return swapped
__lowerCamelCase : int =low
__lowerCamelCase : Optional[int] =high
while left < right:
if collection[left] > collection[right]:
__lowerCamelCase , __lowerCamelCase : str =(
collection[right],
collection[left],
)
__lowerCamelCase : Dict =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowerCamelCase , __lowerCamelCase : Any =(
collection[right + 1],
collection[left],
)
__lowerCamelCase : Tuple =True
__lowerCamelCase : Union[str, Any] =low + int((high - low) / 2 )
__lowerCamelCase : Dict =circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__lowerCamelCase : Union[str, Any] =True
while is_not_sorted is True:
__lowerCamelCase : List[str] =circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
_UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 179 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : List[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : int =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase : Tuple ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : int =key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase : int =prophet
__lowerCamelCase : Optional[int] =prophet_old
else:
__lowerCamelCase : Any =prophet.prophetnet
__lowerCamelCase : Union[str, Any] =prophet_old.model
__lowerCamelCase : Optional[Any] =False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Optional[Any] =mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Any =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : str =old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : Any =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : str =True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__lowerCamelCase : int =old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Tuple =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : int =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Dict =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : str =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict =True
break
if attribute.isdigit():
__lowerCamelCase : List[str] =model[int(SCREAMING_SNAKE_CASE )]
__lowerCamelCase : Optional[Any] =old_model[int(SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : int =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__lowerCamelCase : Dict =old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 179 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return None
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
'''simple docstring'''
return None
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , 'tf' , 12 , **lowerCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , 'pt' , 12 , **lowerCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
from transformers import BertModel
_snake_case : int = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
_snake_case : List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_snake_case : Union[str, Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_ , 'pt' , 12 , lowerCamelCase_ )
@require_tf
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Union[str, Any] = self._test_export(lowerCamelCase_ , 'tf' , 12 , **lowerCamelCase_ )
_snake_case : List[str] = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Any = self._test_export(lowerCamelCase_ , 'pt' , 12 , **lowerCamelCase_ )
_snake_case : int = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_snake_case : Union[str, Any] = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
from transformers import BertModel
_snake_case : Dict = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_snake_case : Tuple = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
from transformers import TFBertModel
_snake_case : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_snake_case : Optional[Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , 'tf' )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = FeatureExtractionPipeline(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_snake_case : Tuple = infer_shapes(lowerCamelCase_ , lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
_snake_case : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_snake_case : int = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ) , set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_snake_case : int = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : str = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 1
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : int = (3_2, 3_2)
_SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase__)
return image
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(lowercase__)
@property
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
def extract(*_A : int , **_A : Union[str, Any]):
class _snake_case :
"""simple docstring"""
def __init__( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = torch.ones([0])
def _lowerCAmelCase ( self : Tuple , _A : List[Any]):
"""simple docstring"""
self.pixel_values.to(lowercase__)
return self
return Out()
return extract
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowercase__)
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_vae
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : int = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
_SCREAMING_SNAKE_CASE : Tuple = 7_7
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.to(lowercase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe.to(lowercase__)
alt_pipe.set_progress_bar_config(disable=lowercase__)
_SCREAMING_SNAKE_CASE : Any = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowercase__).manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , )
_SCREAMING_SNAKE_CASE : Tuple = output.images
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase__).manual_seed(0)
_SCREAMING_SNAKE_CASE : List[str] = alt_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , return_dict=lowercase__ , )[0]
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=lowercase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
_SCREAMING_SNAKE_CASE : Tuple = 7_7
_SCREAMING_SNAKE_CASE : int = self.dummy_image.to(lowercase__)
# put models in fp16
_SCREAMING_SNAKE_CASE : Union[str, Any] = unet.half()
_SCREAMING_SNAKE_CASE : Any = vae.half()
_SCREAMING_SNAKE_CASE : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__)
_SCREAMING_SNAKE_CASE : Any = alt_pipe.to(lowercase__)
alt_pipe.set_progress_bar_config(disable=lowercase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=lowercase__ , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((7_6_0, 5_0_4))
_SCREAMING_SNAKE_CASE : List[str] = """BAAI/AltDiffusion"""
_SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase__ , safety_checker=lowercase__ , )
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , generator=lowercase__ , output_type="""np""" , )
_SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
_SCREAMING_SNAKE_CASE : Dict = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_SCREAMING_SNAKE_CASE : Dict = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
_SCREAMING_SNAKE_CASE : str = init_image.resize((7_6_8, 5_1_2))
_SCREAMING_SNAKE_CASE : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""")
_SCREAMING_SNAKE_CASE : Dict = """BAAI/AltDiffusion"""
_SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase__ , safety_checker=lowercase__ , )
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Optional[int] = """A fantasy landscape, trending on artstation"""
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Any = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , generator=lowercase__ , output_type="""np""" , )
_SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 338 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase_ : Distribution , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=0 ):
lowerCAmelCase_ : Tuple =1.0 if scale is None else scale
lowerCAmelCase_ : Optional[Any] =0.0 if loc is None else loc
super().__init__(UpperCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase_ )] )
@property
def __A ( self : Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def __A ( self : List[str] ):
return self.base_dist.variance * self.scale**2
@property
def __A ( self : Optional[int] ):
return self.variance.sqrt()
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase_ : List[str] ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase_ : str =args_dim
lowerCAmelCase_ : int =nn.ModuleList([nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) for dim in args_dim.values()] )
lowerCAmelCase_ : str =domain_map
def __A ( self : str , UpperCamelCase_ : torch.Tensor ):
lowerCAmelCase_ : Optional[int] =[proj(UpperCamelCase_ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase_ )
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase_ : Optional[int] ):
super().__init__()
lowerCAmelCase_ : List[Any] =function
def __A ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , *UpperCamelCase_ : Optional[int] ):
return self.function(UpperCamelCase_ , *UpperCamelCase_ )
class _snake_case :
"""simple docstring"""
_UpperCamelCase : type
_UpperCamelCase : int
_UpperCamelCase : Dict[str, int]
def __init__( self : int , UpperCamelCase_ : int = 1 ):
lowerCAmelCase_ : List[str] =dim
lowerCAmelCase_ : str ={k: dim * self.args_dim[k] for k in self.args_dim}
def __A ( self : int , UpperCamelCase_ : str ):
if self.dim == 1:
return self.distribution_class(*UpperCamelCase_ )
else:
return Independent(self.distribution_class(*UpperCamelCase_ ) , 1 )
def __A ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , ):
lowerCAmelCase_ : Optional[Any] =self._base_distribution(UpperCamelCase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase_ , loc=UpperCamelCase_ , scale=UpperCamelCase_ , event_dim=self.event_dim )
@property
def __A ( self : Optional[int] ):
return () if self.dim == 1 else (self.dim,)
@property
def __A ( self : Union[str, Any] ):
return len(self.event_shape )
@property
def __A ( self : Optional[int] ):
return 0.0
def __A ( self : Tuple , UpperCamelCase_ : int ):
return ParameterProjection(
in_features=UpperCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __A ( self : Any , *UpperCamelCase_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __A ( UpperCamelCase_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(UpperCamelCase_ ) + 4.0 )) / 2.0
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_UpperCamelCase : type = StudentT
@classmethod
def __A ( cls : Tuple , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor ):
lowerCAmelCase_ : Union[str, Any] =cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCAmelCase_ : Optional[Any] =2.0 + cls.squareplus(UpperCamelCase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
_UpperCamelCase : type = Normal
@classmethod
def __A ( cls : Optional[Any] , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor ):
lowerCAmelCase_ : Optional[int] =cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
_UpperCamelCase : type = NegativeBinomial
@classmethod
def __A ( cls : Optional[int] , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor ):
lowerCAmelCase_ : str =cls.squareplus(UpperCamelCase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __A ( self : Union[str, Any] , UpperCamelCase_ : int ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] =distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ ) , 1 )
def __A ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 305 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''MobileNetV2FeatureExtractor''']
__lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] = 1, lowerCamelCase__ : Optional[Any] = 1_0_0_0 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for divide_by_number in range(lowerCamelCase__, digit + 1 ):
_SCREAMING_SNAKE_CASE : list[int] = []
_SCREAMING_SNAKE_CASE : List[Any] = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = divide_by_number
else:
has_been_divided.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 42
snake_case = (16, 32, 96, 256)
snake_case = jnp.floataa
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ : Dict = self.block_out_channels[i]
A_ : Union[str, Any] = self.block_out_channels[i + 1]
A_ : Dict = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
A_ : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
A_ : str = blocks
A_ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Optional[int] = self.conv_in(_SCREAMING_SNAKE_CASE )
A_ : str = nn.silu(_SCREAMING_SNAKE_CASE )
for block in self.blocks:
A_ : Union[str, Any] = block(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = nn.silu(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.conv_out(_SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class _lowerCamelCase ( nn.Module , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = 32
snake_case = 4
snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case = False
snake_case = (320, 640, 1_280, 1_280)
snake_case = 2
snake_case = 8
snake_case = None
snake_case = 1_280
snake_case = 0.0
snake_case = False
snake_case = jnp.floataa
snake_case = True
snake_case = 0
snake_case = "rgb"
snake_case = (16, 32, 96, 256)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->FrozenDict:
'''simple docstring'''
A_ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
A_ : Any = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ : str = jnp.ones((1,) , dtype=jnp.intaa )
A_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ : int = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ , A_ : Any = jax.random.split(_SCREAMING_SNAKE_CASE )
A_ : Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"]
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = self.block_out_channels
A_ : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
A_ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ : int = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
A_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ : str = self.only_cross_attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
A_ : List[str] = []
A_ : Optional[int] = []
A_ : Optional[int] = block_out_channels[0]
A_ : Tuple = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
A_ : Optional[int] = output_channel
A_ : int = block_out_channels[i]
A_ : Union[str, Any] = i == len(_SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ : str = FlaxCrossAttnDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ : Dict = FlaxDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
A_ : List[str] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
if not is_final_block:
A_ : List[Any] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
A_ : List[str] = down_blocks
A_ : Tuple = controlnet_down_blocks
# mid
A_ : Dict = block_out_channels[-1]
A_ : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , )->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ : Optional[int] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ : int = jnp.flip(_SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ):
A_ : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
A_ : Optional[Any] = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
A_ : Optional[Any] = self.time_proj(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.time_embedding(_SCREAMING_SNAKE_CASE )
# 2. pre-process
A_ : str = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
A_ : Optional[Any] = self.conv_in(_SCREAMING_SNAKE_CASE )
A_ : List[str] = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
A_ : List[Any] = self.controlnet_cond_embedding(_SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
A_ : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ , A_ : Tuple = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
else:
A_ , A_ : Union[str, Any] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ : Optional[int] = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
A_ : str = ()
for down_block_res_sample, controlnet_block in zip(_SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
A_ : List[Any] = controlnet_block(_SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ : Dict = controlnet_down_block_res_samples
A_ : Tuple = self.controlnet_mid_block(_SCREAMING_SNAKE_CASE )
# 6. scaling
A_ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_SCREAMING_SNAKE_CASE , mid_block_res_sample=_SCREAMING_SNAKE_CASE )
| 590 | 0 |
def __UpperCamelCase ( _A ):
assert (
isinstance(_A , _A ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
lowerCAmelCase_ , lowerCAmelCase_ = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = '''▁'''
_A = {'''vocab_file''': '''spiece.model'''}
_A = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_A = {
'''google/pegasus-xsum''': 512,
}
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self, UpperCamelCase__, UpperCamelCase__="<pad>", UpperCamelCase__="</s>", UpperCamelCase__="<unk>", UpperCamelCase__="<mask_2>", UpperCamelCase__="<mask_1>", UpperCamelCase__=None, UpperCamelCase__=103, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"
f" {type(UpperCamelCase__ )}" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(UpperCamelCase__ ), self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, mask_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token_sent=UpperCamelCase__, offset=UpperCamelCase__, additional_special_tokens=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
return 1
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
UpperCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 325 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """AutoImageProcessor"""
_lowercase = """AutoTokenizer"""
def __init__( self: int,A_: Any,A_: Tuple ):
'''simple docstring'''
super().__init__(A_,A_ )
__UpperCamelCase = self.image_processor
def __call__( self: str,A_: Union[str, Any]=None,A_: Optional[int]=None,A_: Optional[int]=None,**A_: Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Dict,*A_: Optional[Any],**A_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Union[str, Any],**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def snake_case (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ = OmegaConf.load(UpperCamelCase )
lowerCamelCase__ = torch.load(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(UpperCamelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(UpperCamelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**UpperCamelCase ).eval()
vqvae.load_state_dict(UpperCamelCase )
lowerCamelCase__ = UNetLDMModel(**UpperCamelCase ).eval()
unet.load_state_dict(UpperCamelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase , )
lowerCamelCase__ = LDMPipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase )
pipeline.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
a__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 165 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_lowercase : Tuple = None
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Optional[int] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_lowercase : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_lowercase : Optional[int] = """▁"""
# Segments (not really needed)
_lowercase : List[str] = 0
_lowercase : List[Any] = 1
_lowercase : Dict = 2
_lowercase : Union[str, Any] = 3
_lowercase : Dict = 4
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = "left"
UpperCamelCase__ = XLNetTokenizer
def __init__( self : Optional[Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : List[str]=False , lowercase_ : str=True , lowercase_ : int=False , lowercase_ : Tuple="<s>" , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : Optional[int]="<sep>" , lowercase_ : Any="<pad>" , lowercase_ : Optional[Any]="<cls>" , lowercase_ : Dict="<mask>" , lowercase_ : List[Any]=["<eop>", "<eod>"] , **lowercase_ : str , ):
lowercase_ : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ : Tuple = 3
lowercase_ : List[str] = do_lower_case
lowercase_ : Optional[int] = remove_space
lowercase_ : str = keep_accents
lowercase_ : Any = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : List[str] = None ):
lowercase_ : str = [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] = None ):
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 721 | '''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self :Tuple , __snake_case :int , __snake_case :int=14 , __snake_case :Tuple=7 , __snake_case :Any=True , __snake_case :Dict=True , __snake_case :Any=True , __snake_case :Optional[Any]=True , __snake_case :Optional[Any]=True , __snake_case :Dict=99 , __snake_case :Any=32 , __snake_case :int=5 , __snake_case :Any=4 , __snake_case :str=37 , __snake_case :List[Any]="gelu" , __snake_case :str=0.1 , __snake_case :str=0.1 , __snake_case :str=5_12 , __snake_case :int=16 , __snake_case :Optional[Any]=2 , __snake_case :Dict=0.02 , __snake_case :Tuple=3 , __snake_case :List[str]=4 , __snake_case :Any=None , ):
'''simple docstring'''
__magic_name__ : int =parent
__magic_name__ : str =batch_size
__magic_name__ : str =seq_length
__magic_name__ : Tuple =is_training
__magic_name__ : List[str] =use_token_type_ids
__magic_name__ : Tuple =use_input_mask
__magic_name__ : Any =use_labels
__magic_name__ : str =use_mc_token_ids
__magic_name__ : List[Any] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Tuple =num_hidden_layers
__magic_name__ : Union[str, Any] =num_attention_heads
__magic_name__ : Tuple =intermediate_size
__magic_name__ : Optional[int] =hidden_act
__magic_name__ : Tuple =hidden_dropout_prob
__magic_name__ : str =attention_probs_dropout_prob
__magic_name__ : Dict =max_position_embeddings
__magic_name__ : Dict =type_vocab_size
__magic_name__ : Any =type_sequence_label_size
__magic_name__ : Optional[Any] =initializer_range
__magic_name__ : Optional[Any] =num_labels
__magic_name__ : Any =num_choices
__magic_name__ : List[str] =scope
__magic_name__ : Optional[Any] =self.vocab_size - 1
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any =None
if self.use_input_mask:
__magic_name__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Any =None
if self.use_token_type_ids:
__magic_name__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple =None
if self.use_mc_token_ids:
__magic_name__ : Tuple =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__magic_name__ : Optional[Any] =None
__magic_name__ : str =None
__magic_name__ : List[str] =None
if self.use_labels:
__magic_name__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] =self.get_config()
__magic_name__ : List[Any] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self :List[Any] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def A__ ( self :int , __snake_case :Dict , __snake_case :Optional[Any] , __snake_case :Tuple , __snake_case :Dict , __snake_case :Dict , *__snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
model(__snake_case , token_type_ids=__snake_case )
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def A__ ( self :Dict , __snake_case :Tuple , __snake_case :str , __snake_case :Dict , __snake_case :Tuple , __snake_case :List[Any] , *__snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : str =CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Any =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Tuple =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any =config_and_inputs
__magic_name__ : str ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :str , __snake_case :Optional[int] , __snake_case :str , *__snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.num_labels
__magic_name__ : str =CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Any , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :int ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =CTRLModelTester(self )
__magic_name__ : Optional[int] =ConfigTester(self , config_class=__snake_case , n_embd=37 )
def A__ ( self :Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@slow
def A__ ( self :Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Tuple =CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@require_torch
class __A ( unittest.TestCase ):
def A__ ( self :Dict ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__snake_case )
__magic_name__ : Optional[Any] =torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is
__magic_name__ : Optional[int] =[
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__magic_name__ : str =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'depth_multiplier' ) )
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=3 , UpperCAmelCase_=32 , UpperCAmelCase_=0.25 , UpperCAmelCase_=8 , UpperCAmelCase_=True , UpperCAmelCase_=1024 , UpperCAmelCase_=32 , UpperCAmelCase_="relu6" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=10 , UpperCAmelCase_=None , ) -> Optional[int]:
lowerCamelCase : List[str] = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = image_size
lowerCamelCase : Any = depth_multiplier
lowerCamelCase : Optional[int] = min_depth
lowerCamelCase : List[Any] = tf_padding
lowerCamelCase : Any = int(last_hidden_size * depth_multiplier )
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[int] = classifier_dropout_prob
lowerCamelCase : List[Any] = use_labels
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : Dict = num_labels
lowerCamelCase : Any = initializer_range
lowerCamelCase : List[Any] = scope
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase : Any = MobileNetVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase : List[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
lowerCamelCase : Any = self.num_labels
lowerCamelCase : Tuple = MobileNetVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase : List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = config_and_inputs
lowerCamelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = MobileNetVaModelTester(self )
lowerCamelCase : str = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(UpperCAmelCase_ )
lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Tuple:
def check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase : List[Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCamelCase : str = outputs.hidden_states
lowerCamelCase : str = 26
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : Dict = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(UpperCAmelCase_ )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Any = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[str] = model(**UpperCAmelCase_ )
# verify the logits
lowerCamelCase : List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCamelCase : int = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 133 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase_ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = 50257 , UpperCAmelCase_ = 1024 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = None , UpperCAmelCase_ = "gelu_new" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 1E-5 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = True , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = False , ) -> Optional[Any]:
super().__init__()
lowerCamelCase : int = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
lowerCamelCase : str = prefix_inner_dim
lowerCamelCase : Union[str, Any] = prefix_hidden_dim
lowerCamelCase : Union[str, Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase : int = (
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase : Optional[Any] = GPTaConfig(
vocab_size=UpperCAmelCase_ , n_positions=UpperCAmelCase_ , n_embd=UpperCAmelCase_ , n_layer=UpperCAmelCase_ , n_head=UpperCAmelCase_ , n_inner=UpperCAmelCase_ , activation_function=UpperCAmelCase_ , resid_pdrop=UpperCAmelCase_ , embd_pdrop=UpperCAmelCase_ , attn_pdrop=UpperCAmelCase_ , layer_norm_epsilon=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , scale_attn_weights=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , scale_attn_by_inverse_layer_idx=UpperCAmelCase_ , reorder_and_upcast_attn=UpperCAmelCase_ , )
lowerCamelCase : Dict = GPTaLMHeadModel(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ) -> Tuple:
lowerCamelCase : Optional[Any] = self.transformer.transformer.wte(UpperCAmelCase_ )
lowerCamelCase : int = self.encode_prefix(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = self.decode_prefix(UpperCAmelCase_ )
lowerCamelCase : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase : Any = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase : Dict = self.transformer(inputs_embeds=UpperCAmelCase_ , labels=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> torch.Tensor:
return torch.zeros(UpperCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
return self.encode_prefix(UpperCAmelCase_ )
@torch.no_grad()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = torch.split(UpperCAmelCase_ , 1 , dim=0 )
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = []
for feature in features:
lowerCamelCase : int = self.decode_prefix(feature.to(UpperCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.generate_beam(
input_embeds=UpperCAmelCase_ , device=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase : List[str] = torch.stack(UpperCAmelCase_ )
lowerCamelCase : List[str] = torch.stack(UpperCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_ = 5 , UpperCAmelCase_ = 67 , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = None , ) -> Tuple:
lowerCamelCase : Dict = eos_token_id
lowerCamelCase : List[Any] = None
lowerCamelCase : str = None
lowerCamelCase : Dict = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.int )
lowerCamelCase : int = torch.zeros(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase : Optional[Any] = input_embeds
else:
lowerCamelCase : Any = self.transformer.transformer.wte(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
lowerCamelCase : Tuple = self.transformer(inputs_embeds=UpperCAmelCase_ )
lowerCamelCase : List[Any] = outputs.logits
lowerCamelCase : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase : str = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase : Any = logits.topk(UpperCAmelCase_ , -1 )
lowerCamelCase : Union[str, Any] = generated.expand(UpperCAmelCase_ , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase : Optional[int] = next_tokens
else:
lowerCamelCase : Optional[Any] = tokens.expand(UpperCAmelCase_ , *tokens.shape[1:] )
lowerCamelCase : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase : Dict = -float(np.inf )
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase : List[str] = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase : Tuple = scores_sum_average.view(-1 ).topk(UpperCAmelCase_ , -1 )
lowerCamelCase : List[Any] = next_tokens // scores_sum.shape[1]
lowerCamelCase : Union[str, Any] = seq_lengths[next_tokens_source]
lowerCamelCase : str = next_tokens % scores_sum.shape[1]
lowerCamelCase : Dict = next_tokens.unsqueeze(1 )
lowerCamelCase : str = tokens[next_tokens_source]
lowerCamelCase : int = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase : Optional[Any] = generated[next_tokens_source]
lowerCamelCase : List[str] = scores_sum_average * seq_lengths
lowerCamelCase : List[str] = is_stopped[next_tokens_source]
lowerCamelCase : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase : str = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase : Optional[int] = is_stopped + next_tokens.eq(UpperCAmelCase_ ).squeeze()
if is_stopped.all():
break
lowerCamelCase : int = scores / seq_lengths
lowerCamelCase : str = scores.argsort(descending=UpperCAmelCase_ )
# tokens tensors are already padded to max_seq_length
lowerCamelCase : List[Any] = [tokens[i] for i in order]
lowerCamelCase : Any = torch.stack(UpperCAmelCase_ , dim=0 )
lowerCamelCase : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 133 | 1 |
'''simple docstring'''
from random import randint, random
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : int = 5 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = [[-1] * number_of_cells] # Create a highway without any car
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = max(UpperCamelCase__ , 0 )
while i < number_of_cells:
UpperCAmelCase_ : List[str] = (
randint(0 , UpperCamelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[str] = highway_now[car_index + 1 :]
for cell in range(len(UpperCamelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCamelCase__ , -1 )
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = len(UpperCamelCase__ )
# Beforce calculations, the highway is empty
UpperCAmelCase_ : int = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCAmelCase_ : Union[str, Any] = min(highway_now[car_index] + 1 , UpperCamelCase__ )
# Number of empty cell before the next car
UpperCAmelCase_ : List[str] = get_distance(UpperCamelCase__ , UpperCamelCase__ ) - 1
# We can't have the car causing an accident
UpperCAmelCase_ : Tuple = min(next_highway[car_index] , UpperCamelCase__ )
if random() < probability:
# Randomly, a driver will slow down
UpperCAmelCase_ : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = len(highway[0] )
for i in range(UpperCamelCase__ ):
UpperCAmelCase_ : Union[str, Any] = update(highway[i] , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : Tuple = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
UpperCAmelCase_ : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCAmelCase_ : Tuple = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCAmelCase_ : Optional[int] = speed
highway.append(UpperCamelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
__UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCAmelCase = True
for i in range(0 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
for i in range(1 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__lowerCAmelCase : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase : Dict = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 262 | 0 |
import datasets
from .evaluate import evaluate
UpperCAmelCase : List[Any] ="""\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
UpperCAmelCase : Optional[Any] ="""
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
UpperCAmelCase : Any ="""
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase (datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=snake_case__ , predictions=snake_case__ )
return score
| 504 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase : Any ="""src/diffusers"""
UpperCAmelCase : Union[str, Any] ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase : int =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase : Optional[Any] =spec.loader.load_module()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
return line.startswith(_lowerCAmelCase) or len(_lowerCAmelCase) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _lowerCAmelCase) is not None
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = object_name.split(".")
UpperCamelCase_ = 0
# First let's find the module where our object lives.
UpperCamelCase_ = parts[i]
while i < len(_lowerCAmelCase) and not os.path.isfile(os.path.join(_lowerCAmelCase , f"""{module}.py""")):
i += 1
if i < len(_lowerCAmelCase):
UpperCamelCase_ = os.path.join(_lowerCAmelCase , parts[i])
if i >= len(_lowerCAmelCase):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""")
with open(os.path.join(_lowerCAmelCase , f"""{module}.py""") , "r" , encoding="utf-8" , newline="\n") as f:
UpperCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase_ = ""
UpperCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCAmelCase) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCAmelCase):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase_ = line_index
while line_index < len(_lowerCAmelCase) and _should_continue(lines[line_index] , _lowerCAmelCase):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase_ = lines[start_index:line_index]
return "".join(_lowerCAmelCase)
UpperCAmelCase : Any =re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase : Any =re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase : Dict =re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = code.split("\n")
UpperCamelCase_ = 0
while idx < len(_lowerCAmelCase) and len(lines[idx]) == 0:
idx += 1
if idx < len(_lowerCAmelCase):
return re.search(r"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(get_indent(_lowerCAmelCase)) > 0
if has_indent:
UpperCamelCase_ = f"""class Bla:\n{code}"""
UpperCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_lowerCAmelCase)
UpperCamelCase_ = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = style_docstrings_in_code(_lowerCAmelCase)
return result[len("class Bla:\n") :] if has_indent else result
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
with open(_lowerCAmelCase , "r" , encoding="utf-8" , newline="\n") as f:
UpperCamelCase_ = f.readlines()
UpperCamelCase_ = []
UpperCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCAmelCase):
UpperCamelCase_ = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = search.groups()
UpperCamelCase_ = find_code_in_diffusers(_lowerCAmelCase)
UpperCamelCase_ = get_indent(_lowerCAmelCase)
UpperCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase_ = theoretical_indent
UpperCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase_ = True
while line_index < len(_lowerCAmelCase) and should_continue:
line_index += 1
if line_index >= len(_lowerCAmelCase):
break
UpperCamelCase_ = lines[line_index]
UpperCamelCase_ = _should_continue(_lowerCAmelCase , _lowerCAmelCase) and re.search(f"""^{indent}# End copy""" , _lowerCAmelCase) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase_ = lines[start_index:line_index]
UpperCamelCase_ = "".join(_lowerCAmelCase)
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase_ = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(_lowerCAmelCase) is None]
UpperCamelCase_ = "\n".join(_lowerCAmelCase)
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCAmelCase) > 0:
UpperCamelCase_ = replace_pattern.replace("with" , "").split(",")
UpperCamelCase_ = [_re_replace_pattern.search(_lowerCAmelCase) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = pattern.groups()
UpperCamelCase_ = re.sub(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if option.strip() == "all-casing":
UpperCamelCase_ = re.sub(obja.lower() , obja.lower() , _lowerCAmelCase)
UpperCamelCase_ = re.sub(obja.upper() , obja.upper() , _lowerCAmelCase)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase_ = blackify(lines[start_index - 1] + theoretical_code)
UpperCamelCase_ = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
UpperCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase_ = start_index + 1
if overwrite and len(_lowerCAmelCase) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""")
with open(_lowerCAmelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_lowerCAmelCase)
return diffs
def _lowerCAmelCase (_lowerCAmelCase = False):
UpperCamelCase_ = glob.glob(os.path.join(_lowerCAmelCase , "**/*.py") , recursive=_lowerCAmelCase)
UpperCamelCase_ = []
for filename in all_files:
UpperCamelCase_ = is_copy_consistent(_lowerCAmelCase , _lowerCAmelCase)
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_lowerCAmelCase) > 0:
UpperCamelCase_ = "\n".join(_lowerCAmelCase)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase : Tuple =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 504 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__A = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] ) -> Union[str, Any]:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__snake_case ) for s in shape] )}.npy"""
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self : Any , __snake_case : Optional[int]=0 , __snake_case : Dict=(4, 4, 64, 64) , __snake_case : Optional[int]=False ) -> str:
_a : str = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return image
def snake_case_ ( self : int , __snake_case : Union[str, Any]=False , __snake_case : List[str]="CompVis/stable-diffusion-v1-4" ) -> Tuple:
_a : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = '''bf16''' if fpaa else None
_a , _a : List[str] = FlaxUNetaDConditionModel.from_pretrained(
__snake_case , subfolder='''unet''' , dtype=__snake_case , revision=__snake_case )
return model, params
def snake_case_ ( self : List[str] , __snake_case : Optional[Any]=0 , __snake_case : str=(4, 77, 768) , __snake_case : Tuple=False ) -> Union[str, Any]:
_a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict ) -> int:
_a , _a : Optional[int] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__snake_case )
_a : Optional[Any] = self.get_latents(__snake_case , fpaa=__snake_case )
_a : Any = self.get_encoder_hidden_states(__snake_case , fpaa=__snake_case )
_a : int = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : int = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Any:
_a , _a : Optional[int] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__snake_case )
_a : Union[str, Any] = self.get_latents(__snake_case , shape=(4, 4, 96, 96) , fpaa=__snake_case )
_a : Optional[Any] = self.get_encoder_hidden_states(__snake_case , shape=(4, 77, 1024) , fpaa=__snake_case )
_a : List[str] = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : Any = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
| 471 | 0 |
from __future__ import annotations
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> None:
snake_case_ = data
snake_case_ = None
snake_case_ = None
def __UpperCAmelCase ( a_): # In Order traversal of the tree
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def __UpperCAmelCase ( a_):
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def __UpperCAmelCase ( a_):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def __UpperCAmelCase ( ): # Main function for testing.
snake_case_ = Node(1)
snake_case_ = Node(2)
snake_case_ = Node(3)
snake_case_ = Node(4)
snake_case_ = Node(5)
snake_case_ = Node(6)
snake_case_ = Node(7)
snake_case_ = Node(8)
snake_case_ = Node(9)
print(is_full_binary_tree(a_))
print(depth_of_tree(a_))
print('Tree is: ')
display(a_)
if __name__ == "__main__":
main()
| 705 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase = parse(importlib.metadata.version("torch"))
def __UpperCAmelCase ( a_ , a_ , a_):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}''')
snake_case_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(a_ , a_):
snake_case_ = parse(importlib.metadata.version(a_))
return operation(a_ , parse(a_))
def __UpperCAmelCase ( a_ , a_):
return compare_versions(a_ , a_ , a_)
| 607 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
SCREAMING_SNAKE_CASE : List[str] = 'not installed'
SCREAMING_SNAKE_CASE : List[Any] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : str = torch.__version__
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : str = 'not installed'
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE : Any = transformers.__version__
SCREAMING_SNAKE_CASE : int = 'not installed'
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE : Optional[Any] = accelerate.__version__
SCREAMING_SNAKE_CASE : Any = 'not installed'
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE : Union[str, Any] = xformers.__version__
SCREAMING_SNAKE_CASE : Optional[int] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 251 | '''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
SCREAMING_SNAKE_CASE : List[str] = 'not installed'
SCREAMING_SNAKE_CASE : List[Any] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : str = torch.__version__
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : str = 'not installed'
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE : Any = transformers.__version__
SCREAMING_SNAKE_CASE : int = 'not installed'
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE : Optional[Any] = accelerate.__version__
SCREAMING_SNAKE_CASE : Any = 'not installed'
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE : Union[str, Any] = xformers.__version__
SCREAMING_SNAKE_CASE : Optional[int] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 251 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =XLMRobertaTokenizer
UpperCamelCase__ : Union[str, Any] =XLMRobertaTokenizerFast
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple ='<pad>'
_lowerCamelCase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1002 )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : List[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : int =tempfile.mkdtemp()
_lowerCamelCase : List[str] =tokenizer_r.save_pretrained(lowercase_ )
_lowerCamelCase : int =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[Any] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Dict =tempfile.mkdtemp()
_lowerCamelCase : int =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : str =tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Any =tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[str] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
_lowerCamelCase : Union[str, Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
_lowerCamelCase : Dict =pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any =self.get_tokenizer()
_lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
_lowerCamelCase : Tuple ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Any =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : int =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] =tokenizer.encode(lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='Hello World!'
_lowerCamelCase : Union[str, Any] =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCamelCase : List[str] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 464 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCamelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCamelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCamelCase = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 35 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 35 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase = get_logger(__name__)
lowerCamelCase = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowercase__ :
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : str , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowercase__ :
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Union[str, Any] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
UpperCAmelCase_ = inspect.signature(processor.__call__ ).parameters
if len(_UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
UpperCAmelCase_ = processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
else:
UpperCAmelCase_ = processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : float ) -> List[Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
UpperCAmelCase_ = temperature
def __call__( self : Dict , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ = scores / self.temperature
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : float , _UpperCAmelCase : float = -float("Inf" ) , _UpperCAmelCase : int = 1 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
UpperCAmelCase_ = top_p
UpperCAmelCase_ = filter_value
UpperCAmelCase_ = min_tokens_to_keep
def __call__( self : Optional[int] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = lax.top_k(_UpperCAmelCase , scores.shape[-1] )
UpperCAmelCase_ = jnp.full_like(_UpperCAmelCase , self.filter_value )
UpperCAmelCase_ = jax.nn.softmax(_UpperCAmelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase_ = jnp.roll(_UpperCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_UpperCAmelCase )
# min tokens to keep
UpperCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCAmelCase )
UpperCAmelCase_ = jnp.where(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jax.lax.sort_key_val(_UpperCAmelCase , _UpperCAmelCase )[-1]
return next_scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : float = -float("Inf" ) , _UpperCAmelCase : int = 1 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
UpperCAmelCase_ = max(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = filter_value
def __call__( self : Union[str, Any] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = scores.shape
UpperCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase_ , UpperCAmelCase_ = lax.top_k(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase_ = topk_scores.flatten()
UpperCAmelCase_ = topk_indices.flatten() + shift
UpperCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCAmelCase )
UpperCAmelCase_ = next_scores_flat.reshape(_UpperCAmelCase , _UpperCAmelCase )
return next_scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = bos_token_id
def __call__( self : Optional[Any] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase_ = jnp.where(_UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCAmelCase )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = max_length
UpperCAmelCase_ = eos_token_id
def __call__( self : Optional[int] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase_ = jnp.where(_UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCAmelCase )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
UpperCAmelCase_ = min_length
UpperCAmelCase_ = eos_token_id
def __call__( self : List[str] , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase_ = jnp.where(_UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCAmelCase )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = list(_UpperCAmelCase )
UpperCAmelCase_ = begin_index
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase_ = jnp.where(_UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCAmelCase )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : list ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = list(_UpperCAmelCase )
def __call__( self : Dict , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = dict(_UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase_ = force_token_array.at[index].set(_UpperCAmelCase )
UpperCAmelCase_ = jnp.intaa(_UpperCAmelCase )
def __call__( self : str , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(_UpperCAmelCase : Optional[Any] ):
UpperCAmelCase_ = scores.shape[0]
UpperCAmelCase_ = self.force_token_array[generation_idx]
UpperCAmelCase_ = jnp.ones_like(_UpperCAmelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase_ = lax.dynamic_update_slice(_UpperCAmelCase , _UpperCAmelCase , (0, current_token) )
return new_scores
UpperCAmelCase_ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCAmelCase ) , lambda: scores , ) , )
return scores
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = generate_config.eos_token_id
UpperCAmelCase_ = generate_config.no_timestamps_token_id
UpperCAmelCase_ = generate_config.no_timestamps_token_id + 1
UpperCAmelCase_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCAmelCase , "max_initial_timestamp_index" ):
UpperCAmelCase_ = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase_ = model_config.vocab_size
def __call__( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_UpperCAmelCase : int , _UpperCAmelCase : int ):
UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCAmelCase , )
UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCAmelCase , _UpperCAmelCase , )
return jnp.where(
_UpperCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCAmelCase , )
UpperCAmelCase_ = jax.vmap(_UpperCAmelCase )(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCAmelCase , )
UpperCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase_ = jnp.where(
_UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase_ = jax.nn.log_softmax(_UpperCAmelCase , axis=-1 )
def handle_cumulative_probs(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
UpperCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCAmelCase , )
UpperCAmelCase_ = jax.vmap(_UpperCAmelCase )(_UpperCAmelCase , _UpperCAmelCase )
return scores
| 82 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase :List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Dict = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Any = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Optional[int] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase :str = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Optional[int] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 278 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( __UpperCamelCase ):
def _A ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''width_multiplier''' ) )
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str]=1_3 , lowerCamelCase__ : List[Any]=6_4 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Optional[int]="swish" , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : int=3_2 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=1_0 , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[Any]=0.2_5 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : str=0.0 , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Dict = make_divisible(5_1_2 * width_multiplier , divisor=8 )
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : str = conv_kernel_size
lowerCAmelCase : int = output_stride
lowerCAmelCase : Optional[Any] = classifier_dropout_prob
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = scope
lowerCAmelCase : Tuple = width_multiplier
lowerCAmelCase : Optional[Any] = ffn_dropout
lowerCAmelCase : Optional[int] = attn_dropout
def _A ( self : Tuple ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _A ( self : List[str] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _A ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ):
lowerCAmelCase : List[Any] = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : int = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Tuple = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ):
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self : Optional[int] ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
_lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _A ( self : Any ):
lowerCAmelCase : Any = MobileViTVaModelTester(self )
lowerCAmelCase : Optional[Any] = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _A ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _A ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _A ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _A ( self : Dict ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self : str ):
pass
def _A ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _A ( self : List[Any] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _A ( self : List[Any] ):
def check_hidden_states_output(lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ):
lowerCAmelCase : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase : Tuple = outputs.hidden_states
lowerCAmelCase : List[str] = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase : List[Any] = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase , lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _A ( self : int ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _A ( self : int ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def _A ( self : Dict ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _A ( self : Tuple ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _A ( self : str ):
lowerCAmelCase : Any = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
lowerCamelCase__ )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : str = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase : List[str] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _A ( self : str ):
lowerCAmelCase : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase : Any = model.to(lowerCamelCase__ )
lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : List[str] = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**lowerCamelCase__ )
lowerCAmelCase : Any = outputs.logits
# verify the logits
lowerCAmelCase : Optional[int] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowerCamelCase__ )
lowerCAmelCase : Any = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _A ( self : Optional[Any] ):
lowerCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase : str = model.to(lowerCamelCase__ )
lowerCAmelCase : Dict = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase : Optional[int] = prepare_img()
lowerCAmelCase : str = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Tuple = model(**lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(5_0, 6_0)] )
lowerCAmelCase : str = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 348 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''char'''
A = '''bpe'''
A = '''wp'''
_a : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''char_tokenizer''']
A = '''ViTImageProcessor'''
A = '''MgpstrTokenizer'''
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
__lowerCamelCase = kwargs.pop("""feature_extractor""" )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained("""gpt2""" )
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
__lowerCamelCase = self.char_tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """char""" )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """bpe""" )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """wp""" )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(UpperCAmelCase ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = """[s]"""
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = """#"""
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 1_0_2
__lowerCamelCase = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=UpperCAmelCase , sorted=UpperCAmelCase )
__lowerCamelCase = preds_index.view(-1 , UpperCAmelCase )[:, 1:]
__lowerCamelCase = decoder(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(UpperCAmelCase , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(UpperCAmelCase ):
__lowerCamelCase = preds_str[index].find(UpperCAmelCase )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(UpperCAmelCase ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCAmelCase )
conf_scores.append(UpperCAmelCase )
return dec_strs, conf_scores
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
def lowerCamelCase_ ( self , UpperCAmelCase ):
return self.bpe_tokenizer.batch_decode(UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
| 479 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_ ( lowerCAmelCase: Accelerator , lowerCAmelCase: int = 16 , lowerCAmelCase: str = "bert-base-cased" )-> Tuple:
_snake_case : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
_snake_case : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase: str ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Optional[Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase: List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
_snake_case : str = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Dict )-> Tuple:
model.eval()
_snake_case : List[str] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
_snake_case : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_snake_case : List[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
_snake_case : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
_snake_case : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> List[str]:
# Initialize accelerator
_snake_case : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : List[Any] = config["""lr"""]
_snake_case : int = int(config['num_epochs'] )
_snake_case : Dict = int(config['seed'] )
_snake_case : Optional[Any] = int(config['batch_size'] )
_snake_case : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase__ )
_snake_case : Tuple = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
_snake_case : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : str = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_snake_case : List[Any] = 1
_snake_case : List[str] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
_snake_case : str = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
_snake_case : Any = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : Optional[Any] = 0
_snake_case : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
_snake_case : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
_snake_case : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_snake_case : Optional[int] = args.resume_from_checkpoint.split('epoch_' )[1]
_snake_case : str = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_snake_case : Union[str, Any] = int(UpperCAmelCase__ ) + 1
_snake_case : Optional[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
_snake_case : Any = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_snake_case : Optional[int] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
_snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
_snake_case : Dict = outputs.loss
_snake_case : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_snake_case : List[str] = F"""epoch_{epoch}"""
_snake_case : Dict = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
_snake_case : Dict = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case : Optional[int] = accuracy
_snake_case : List[str] = lr_scheduler.get_lr()[0]
_snake_case : List[str] = optimizer.param_groups[0]["""lr"""]
_snake_case : List[str] = epoch
_snake_case : List[str] = overall_step
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( )-> str:
_snake_case : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=2 , help='Number of train epochs.' , )
_snake_case : Dict = parser.parse_args()
_snake_case : Tuple = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
'''simple docstring'''
from itertools import count
def lowercase (_A = 5_0 ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = [1] * min_block_length
for n in count(_A ):
fill_count_functions.append(1 )
for block_length in range(_A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 444 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase : Tuple = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "rag"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=" / " , snake_case__=" // " , snake_case__=5 , snake_case__=300 , snake_case__=768 , snake_case__=8 , snake_case__="wiki_dpr" , snake_case__="train" , snake_case__="compressed" , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=0.0 , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : Optional[Any] = kwargs.pop('question_encoder' )
_lowerCAmelCase : List[Any] = question_encoder_config.pop('model_type' )
_lowerCAmelCase : List[str] = kwargs.pop('generator' )
_lowerCAmelCase : Tuple = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : Union[str, Any] = AutoConfig.for_model(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = AutoConfig.for_model(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = reduce_loss
_lowerCAmelCase : Optional[Any] = label_smoothing
_lowerCAmelCase : Dict = exclude_bos_score
_lowerCAmelCase : Optional[int] = do_marginalize
_lowerCAmelCase : List[str] = title_sep
_lowerCAmelCase : Optional[Any] = doc_sep
_lowerCAmelCase : str = n_docs
_lowerCAmelCase : Optional[int] = max_combined_length
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Optional[int] = dataset_split
_lowerCAmelCase : str = index_name
_lowerCAmelCase : Tuple = retrieval_vector_size
_lowerCAmelCase : Any = retrieval_batch_size
_lowerCAmelCase : Any = passages_path
_lowerCAmelCase : Tuple = index_path
_lowerCAmelCase : Any = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : List[str] = do_deduplication
_lowerCAmelCase : Optional[int] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator , 'forced_eos_token_id' , snake_case__ )
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.question_encoder.to_dict()
_lowerCAmelCase : Optional[Any] = self.generator.to_dict()
_lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 444 | 1 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
while number > 0:
SCREAMING_SNAKE_CASE__ = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A ( snake_case__ = 1_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = factorial(snake_case__ )
SCREAMING_SNAKE_CASE__ = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 616 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = False
return options
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__UpperCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 616 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Dict = config_class
SCREAMING_SNAKE_CASE : Optional[Any] = has_text_modality
SCREAMING_SNAKE_CASE : List[str] = kwargs
SCREAMING_SNAKE_CASE : List[Any] = common_properties
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : Dict = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase_ ):
try:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase_ ):
try:
SCREAMING_SNAKE_CASE : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : Any = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = os.path.join(UpperCAmelCase_ , "config.json" )
config_first.to_json_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.config_class.from_json_file(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class.from_pretrained(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : Dict = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
config_first.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.config_class.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE : List[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _A ( self : str ):
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self.config_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase_ , UpperCAmelCase_ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase_ , UpperCAmelCase_ ), value) )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Any = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def _A ( self : Tuple ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 62 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
requires_backends(self , "vision")
requires_backends(self , "torch")
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
self.check_model_type(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] ={}
lowerCamelCase__: Tuple ={}
lowerCamelCase__: str ={}
# preprocess args
if "points_per_batch" in kwargs:
lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowerCamelCase__: int =kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowerCamelCase__: Any =kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowerCamelCase__: int =kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowerCamelCase__: Optional[int] =kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowerCamelCase__: str =kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowerCamelCase__: Any =kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowerCamelCase__: List[Any] =kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =load_image(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt")
with self.device_placement():
if self.framework == "pt":
lowerCamelCase__: str =self.get_inference_context()
with inference_context():
lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device)
lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values"))
lowerCamelCase__: str =image_embeddings
lowerCamelCase__: int =grid_points.shape[1]
lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None")
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :]
lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch]
lowerCamelCase__: Dict =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =model_inputs.pop("input_boxes")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist()
lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist()
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCamelCase__: Optional[int] =model_outputs["pred_masks"]
lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =[]
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: List[str] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores"))
all_masks.extend(model_output.pop("masks"))
all_boxes.append(model_output.pop("boxes"))
lowerCamelCase__: str =torch.cat(UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_)
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_)
lowerCamelCase__: Any ={}
if output_rle_mask:
lowerCamelCase__: Union[str, Any] =rle_mask
if output_bboxes_mask:
lowerCamelCase__: int =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 59 | 0 |
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__lowercase = 4
__lowercase = (1 << p) - 1
for _ in range(p - 2 ):
__lowercase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 576 | from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> list[float]:
'''simple docstring'''
__lowercase , __lowercase = coefficient_matrix.shape
__lowercase , __lowercase = constant_matrix.shape
if rowsa != colsa:
__lowercase = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if colsa != 1:
__lowercase = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
__lowercase = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
__lowercase = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(_UpperCAmelCase )} and {rowsa}'''
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowercase , __lowercase = table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
__lowercase = []
for row in range(_UpperCAmelCase ):
__lowercase = 0
for col in range(_UpperCAmelCase ):
if col == row:
__lowercase = table[row][col]
elif col == cols - 1:
__lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowercase = (temp + val) / denom
new_val.append(_UpperCAmelCase )
__lowercase = new_val
return [float(_UpperCAmelCase ) for i in new_val]
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
__lowercase , __lowercase = table.shape
__lowercase = True
for i in range(0 , _UpperCAmelCase ):
__lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE_ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = XLNetConfig.from_json_file(_lowercase )
UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCamelCase = finetuning_task
UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCamelCase = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
UpperCamelCase = finetuning_task
UpperCamelCase = XLNetForQuestionAnswering(_lowercase )
else:
UpperCamelCase = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase ,_lowercase ,_lowercase )
# Save pytorch-model
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
print(f'Save PyTorch model to {os.path.abspath(_lowercase )}' )
torch.save(model.state_dict() ,_lowercase )
print(f'Save configuration file to {os.path.abspath(_lowercase )}' )
with open(_lowercase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 34 | '''simple docstring'''
from numpy import exp, pi, sqrt
def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 | 0 |
from PIL import Image
def snake_case__ ( UpperCAmelCase : Image , UpperCAmelCase : float ):
def brightness(UpperCAmelCase : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_a : Tuple = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 702 |
def snake_case__ ( UpperCAmelCase : Tuple ):
lowerCAmelCase__ :List[Any] = len(UpperCAmelCase )
for i in range(length - 1 ):
lowerCAmelCase__ :Union[str, Any] = i
for k in range(i + 1 , UpperCAmelCase ):
if collection[k] < collection[least]:
lowerCAmelCase__ :Any = k
if least != i:
lowerCAmelCase__ ,lowerCAmelCase__ :int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
_a : Any = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 111 | 0 |
def lowercase ( _lowerCAmelCase ):
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = credit_card_number
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
UpperCAmelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase__ = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 392 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
_A = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size for training."} )
_A = field(default=2 , metadata={"help": "Batch size for evaluation."} )
_A = field(default=0.1 , metadata={"help": "Value of weight decay."} )
_A = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_A = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
_A = field(default="cosine" , metadata={"help": "Learning rate."} )
_A = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_A = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
_A = field(
default=__snake_case , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_A = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
_A = field(default=1 , metadata={"help": "Training seed."} )
_A = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
_A = field(
default=__snake_case , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_A = field(default=__snake_case , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
_A = field(
default=__snake_case , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
_A = field(
default=__snake_case , metadata={"help": "Sample from the language model's output distribution."} )
_A = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
_A = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
_A = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
_A = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
_A = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
_A = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_A = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowercase :
_A = field(
default=__snake_case , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
_A = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
_A = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
_A = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_A = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_A = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_A = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_A = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
_A = field(
default=__snake_case , metadata={"help": "If True, near-duplicate samples are removed."} )
_A = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
_A = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
_A = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
_A = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
_A = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
_A = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
| 461 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowercase ).to(_lowercase )
__UpperCamelCase =AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase =tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase =model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "efficientnet"
def __init__( self : List[Any] , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 600 , _lowerCAmelCase : float = 2.0 , _lowerCAmelCase : float = 3.1 , _lowerCAmelCase : int = 8 , _lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , _lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , _lowerCAmelCase : List[int] = [] , _lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _lowerCAmelCase : float = 0.25 , _lowerCAmelCase : str = "swish" , _lowerCAmelCase : int = 2_560 , _lowerCAmelCase : str = "mean" , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 0.001 , _lowerCAmelCase : float = 0.99 , _lowerCAmelCase : float = 0.5 , _lowerCAmelCase : float = 0.2 , **_lowerCAmelCase : str , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(_lowerCAmelCase ) * 4
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1E-5 | 31 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def _A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : str = inputs["prompt"]
lowerCAmelCase__ : Optional[Any] = inputs["generator"]
lowerCAmelCase__ : str = inputs["num_inference_steps"]
lowerCAmelCase__ : int = inputs["output_type"]
if "image" in inputs:
lowerCAmelCase__ : Optional[int] = inputs["image"]
else:
lowerCAmelCase__ : str = None
if "mask_image" in inputs:
lowerCAmelCase__ : List[str] = inputs["mask_image"]
else:
lowerCAmelCase__ : str = None
if "original_image" in inputs:
lowerCAmelCase__ : Optional[int] = inputs["original_image"]
else:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ , lowerCAmelCase__ : Dict = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
lowerCAmelCase__ : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ : Optional[Any] = image
if mask_image is not None:
lowerCAmelCase__ : Optional[int] = mask_image
if original_image is not None:
lowerCAmelCase__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
lowerCAmelCase__ : Optional[int] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : Tuple = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Any = inputs["generator"]
lowerCAmelCase__ : Union[str, Any] = inputs["num_inference_steps"]
lowerCAmelCase__ : str = inputs["output_type"]
# inputs with prompt converted to embeddings
lowerCAmelCase__ : int = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ : Tuple = image
if mask_image is not None:
lowerCAmelCase__ : int = mask_image
if original_image is not None:
lowerCAmelCase__ : List[Any] = original_image
lowerCAmelCase__ : Dict = pipe_loaded(**a__ )[0]
lowerCAmelCase__ : int = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : int = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Union[str, Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : str = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ : str = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Union[str, Any] = pipe_loaded(**a__ )[0]
lowerCAmelCase__ : Union[str, Any] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
| 378 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Optional[int] , _snake_case : int ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
A__ = {'source': 'What is love ?', 'target': 'life'}
A__ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A__ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_snake_case , F'''{split}.{field}''' ) , 'w' ) as f:
f.write(_snake_case )
def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : str = "pytorch" ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = os.path.join(_snake_case , 'output' )
A__ = os.path.join(_snake_case , 'data' )
self._create_dummy_data(data_dir=_snake_case )
A__ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
A__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_snake_case , env=self.get_env() )
A__ = os.path.join(_snake_case , 'metrics.json' )
with open(_snake_case ) as f:
A__ = json.load(_snake_case )
return result
@require_torch_gpu
def _a ( self : Dict ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 52 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['DPTFeatureExtractor']
lowercase_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10**9) -> int:
'''simple docstring'''
__UpperCamelCase : int = 1
__UpperCamelCase : Any = 2
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }") | 557 | 0 |
import argparse
import copy
def __A ( _A ):
"""simple docstring"""
__a = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__a = []
_list.append([line.split()[1], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__a = []
_list.append([line.split()[0], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __A ( _A , _A ):
"""simple docstring"""
with open(_A ) as f:
__a = f.read(1 )
__a = start_node
__a = []
__a = start_node
__a = 0
while visiting not in first_solution:
__a = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
__a = k[1]
__a = k[0]
first_solution.append(_A )
__a = distance_of_first_solution + int(_A )
__a = best_node
first_solution.append(_A )
__a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __A ( _A , _A ):
"""simple docstring"""
__a = []
for n in solution[1:-1]:
__a = solution.index(_A )
for kn in solution[1:-1]:
__a = solution.index(_A )
if n == kn:
continue
__a = copy.deepcopy(_A )
__a = kn
__a = n
__a = 0
for k in _tmp[:-1]:
__a = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__a = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __A ( _A , _A , _A , _A , _A ):
"""simple docstring"""
__a = 1
__a = first_solution
__a = []
__a = distance_of_first_solution
__a = solution
while count <= iters:
__a = find_neighborhood(_A , _A )
__a = 0
__a = neighborhood[index_of_best_solution]
__a = len(_A ) - 1
__a = False
while not found:
__a = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
__a = best_solution[i]
__a = solution[i]
break
__a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__a = True
__a = best_solution[:-1]
__a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__a = cost
__a = solution
else:
__a = index_of_best_solution + 1
__a = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
__a = count + 1
return best_solution_ever, best_cost
def __A ( _A=None ):
"""simple docstring"""
__a = generate_neighbours(args.File )
__a , __a = generate_first_solution(
args.File , _A )
__a , __a = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 525 | from __future__ import annotations
SCREAMING_SNAKE_CASE : Optional[int] = []
def __A ( _A , _A , _A ):
"""simple docstring"""
for i in range(len(_A ) ):
if board[row][i] == 1:
return False
for i in range(len(_A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ):
if board[i][j] == 1:
return False
return True
def __A ( _A , _A ):
"""simple docstring"""
if row >= len(_A ):
solution.append(_A )
printboard(_A )
print()
return True
for i in range(len(_A ) ):
if is_safe(_A , _A , _A ):
__a = 1
solve(_A , row + 1 )
__a = 0
return False
def __A ( _A ):
"""simple docstring"""
for i in range(len(_A ) ):
for j in range(len(_A ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE : List[str] = 8
SCREAMING_SNAKE_CASE : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 525 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list:
if n_term == "":
return []
lowercase__: list = []
for temp in range(int(__UpperCAmelCase ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
__A = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 586 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__: List[str] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__: str = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss
lowercase__: List[str] = -tf.math.reduce_mean(_UpperCAmelCase ).numpy()
lowercase__: List[str] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 586 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""") | 702 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = AlbertTokenizer
SCREAMING_SNAKE_CASE = AlbertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : List[Any] = AlbertTokenizer(_SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = "this is a test"
__lowerCAmelCase : Any = "this is a test"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = "<pad>"
__lowerCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "▁eloquent")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 3_0000)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Dict = self.get_rust_tokenizer()
__lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : str = tokenizer.encode(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = AlbertTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁this", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [48, 25, 21, 1289])
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."])
__lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase : int = AlbertTokenizer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.encode("sequence builders")
__lowerCAmelCase : List[Any] = tokenizer.encode("multi-sequence build")
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , ) | 615 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Tuple = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
def __lowerCamelCase ( __a :int ) -> bool:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
A__ = str(__a )
A__ = """""".join(sorted(__a ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCamelCase ( __a :float = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
A__ = 0
A__ = 1
while True:
if check_bouncy(__a ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(9_9)}''')
| 247 |
import os
import string
import sys
A : Dict = 1 << 8
A : Dict = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
A : Any = KEYMAP['''up''']
A : Optional[Any] = KEYMAP['''left''']
if sys.platform == "win32":
A : Optional[Any] = []
A : str = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
A : Tuple = ord(str(i))
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
A__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__a ) == 0:
# Read the keystroke
A__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__a )
if ord(__a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
A__ = chr(KEYMAP["""esc"""] )
except KeyError:
A__ = cha[1]
else:
A__ = ch.decode(__a )
else:
A__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ = sys.stdin.fileno()
A__ = termios.tcgetattr(__a )
try:
tty.setraw(__a )
A__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__a , termios.TCSADRAIN , __a )
return ch
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = get_raw_chars()
if ord(__a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__a ) == KEYMAP["esc"]:
A__ = get_raw_chars()
if ord(__a ) == KEYMAP["mod_int"]:
A__ = get_raw_chars()
if ord(__a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 247 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case: str =tf.convert_to_tensor(__UpperCAmelCase )
snake_case: int =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Optional[int] =tf.convert_to_tensor(__UpperCAmelCase )
snake_case: Tuple =tf.cast(math.pi , x.dtype )
snake_case: Dict =tf.cast(0.04_4715 , x.dtype )
snake_case: List[str] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCAmelCase , 3 )) ))
return x * cdf
def a_ ( __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: Optional[int] =tf.convert_to_tensor(__UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(__UpperCAmelCase ) )
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: List[str] =tf.convert_to_tensor(__UpperCAmelCase )
snake_case: Dict =tf.cast(0.04_4715 , x.dtype )
snake_case: List[str] =tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case: Union[str, Any] =tf.convert_to_tensor(__UpperCAmelCase )
snake_case: Tuple =tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
return tf.clip_by_value(_gelu(__UpperCAmelCase ) , -10 , 10 )
def a_ ( __UpperCAmelCase , __UpperCAmelCase=-1 ) -> List[str]:
"""simple docstring"""
snake_case , snake_case: int =tf.split(__UpperCAmelCase , 2 , axis=__UpperCAmelCase )
return a * tf.math.sigmoid(__UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
return tf.keras.activations.gelu(__UpperCAmelCase , approximate=__UpperCAmelCase )
a = tf.keras.activations.gelu
a = approximate_gelu_wrap
else:
a = _gelu
a = _gelu_new
a = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 350 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list) -> list:
if len(a_) == 0:
return []
__a , __a : Optional[Any] = min(a_), max(a_)
__a : Optional[Any] = int(max_value - min_value) + 1
__a : list[list] = [[] for _ in range(a_)]
for i in my_list:
buckets[int(i - min_value)].append(a_)
return [v for bucket in buckets for v in sorted(a_)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 101 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=768 ):
super().__init__(_UpperCAmelCase )
__a : str = proj_size
__a : Optional[Any] = CLIPVisionModel(_UpperCAmelCase )
__a : List[Any] = PaintByExampleMapper(_UpperCAmelCase )
__a : int = nn.LayerNorm(config.hidden_size )
__a : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__a : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : str = self.model(pixel_values=_UpperCAmelCase )
__a : Union[str, Any] = clip_output.pooler_output
__a : Optional[int] = self.mapper(latent_states[:, None] )
__a : int = self.final_layer_norm(_UpperCAmelCase )
__a : Optional[Any] = self.proj_out(_UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[str] = (config.num_hidden_layers + 1) // 5
__a : Optional[Any] = config.hidden_size
__a : str = 1
__a : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , activation_fn='''gelu''' , attention_bias=_UpperCAmelCase )
for _ in range(_UpperCAmelCase )
] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for block in self.blocks:
__a : Union[str, Any] = block(_UpperCAmelCase )
return hidden_states | 101 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a : Dict = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
a : List[Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
a : int = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase(datasets.Metric ):
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
a__ = compute_bleu(
reference_corpus=__SCREAMING_SNAKE_CASE , translation_corpus=__SCREAMING_SNAKE_CASE , max_order=__SCREAMING_SNAKE_CASE , smooth=__SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__) , (a__) , (a__) , (a__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 273 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int) -> List[Any]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase : str = torch.permute(lowerCAmelCase__ , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase__):
# linear layer
__UpperCamelCase : Dict = flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple) -> Any:
'''simple docstring'''
if "metadata" in layer:
__UpperCamelCase : List[Any] = layer.split("metadata")
__UpperCamelCase : Any = ''.join(split_layer[0])[:-1]
__UpperCamelCase : Any = [tuple(("metadata" + split_layer[1]).split("/"))]
elif "kvstore" in layer:
__UpperCamelCase : int = layer.split("kvstore")
__UpperCamelCase : Dict = ''.join(split_layer[0])[:-1]
__UpperCamelCase : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/"))]
else:
__UpperCamelCase : List[str] = layer.split("/")
__UpperCamelCase : int = '/'.join(split_layer[:-1])
__UpperCamelCase : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
__UpperCamelCase : Dict = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__UpperCamelCase : str = 'file'
else:
__UpperCamelCase : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Dict = rename_keys(lowerCAmelCase__)
__UpperCamelCase : Any = {}
for k, v in current_block.items():
__UpperCamelCase : Any = v
__UpperCamelCase : Dict = new_current_block
torch.save(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str = WEIGHTS_NAME) -> List[str]:
'''simple docstring'''
__UpperCamelCase : str = convert_file_size_to_int(lowerCAmelCase__)
__UpperCamelCase : Tuple = []
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : int = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb") as fp:
__UpperCamelCase : Optional[Any] = serialization.msgpack_restore(fp.read())['optimizer']['target']
__UpperCamelCase : Dict = flatten_dict(lowerCAmelCase__ , sep="/")
__UpperCamelCase : int = {}
for layer in checkpoint_info.keys():
__UpperCamelCase : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if curr_real_layer_name in all_layers:
__UpperCamelCase : Any = content
else:
__UpperCamelCase : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCamelCase : str = ts.open(unflatten_dict(all_layers[key])).result().read().result()
__UpperCamelCase : Union[str, Any] = torch.tensor(lowerCAmelCase__)
__UpperCamelCase : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
__UpperCamelCase : Optional[int] = rename_base_flax_keys(tuple(key.split("/")) , lowerCAmelCase__)
__UpperCamelCase : Optional[Any] = '/'.join(lowerCAmelCase__)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCamelCase : List[Any] = os.path.join(
lowerCAmelCase__ , weights_name.replace(".bin" , F'-{len(lowerCAmelCase__)+1:05d}-of-???.bin'))
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__)
sharded_state_dicts.append(current_block.keys())
del current_block
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : str = 0
__UpperCamelCase : Optional[int] = raw_weights.to(getattr(lowerCAmelCase__ , lowerCAmelCase__))
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCamelCase : List[str] = os.path.join(lowerCAmelCase__ , weights_name.replace(".bin" , F'-{len(lowerCAmelCase__)+1:05d}-of-???.bin'))
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(lowerCAmelCase__) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = {}
for idx, shard in enumerate(lowerCAmelCase__):
__UpperCamelCase : int = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(lowerCAmelCase__):05d}.bin') # len(sharded_state_dicts):05d}
__UpperCamelCase : str = os.path.join(lowerCAmelCase__ , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin'))
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__))
__UpperCamelCase : Optional[int] = shard
for key in shard:
__UpperCamelCase : Optional[Any] = shard_file
# Add the metadata
__UpperCamelCase : Optional[int] = {'total_size': total_size}
__UpperCamelCase : int = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , "w" , encoding="utf-8") as f:
__UpperCamelCase : Union[str, Any] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__) + '\n'
f.write(lowerCAmelCase__)
return metadata, index
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowercase : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCamelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8")
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted")
__UpperCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto")
__UpperCamelCase : Any = TaTokenizer.from_pretrained("t5-small")
__UpperCamelCase : Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__UpperCamelCase : int = tokenizer(lowerCAmelCase__ , return_tensors="pt").input_ids
__UpperCamelCase : List[Any] = model.generate(lowerCAmelCase__ , decoder_start_token_id=0)
print(tokenizer.decode(out[0])) | 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 94 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.