code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ : int = ['gpt2']
SCREAMING_SNAKE_CASE__ : str = 'gpt2'
if is_tf_available():
class UpperCamelCase__ (tf.Module ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
super().__init__()
lowerCamelCase : Any = tokenizer
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
lowerCamelCase : Any = TFGPTaLMHeadModel.from_config(__lowerCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Dict = self.tokenizer(__lowerCAmelCase )
lowerCamelCase : Union[str, Any] = tokenized["input_ids"].to_tensor()
lowerCamelCase : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase : Union[str, Any] = self.model(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
super().setUp()
lowerCamelCase : Union[str, Any] = [GPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase : Optional[int] = [TFGPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase : int = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowerCamelCase : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowercase ( self ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase : Union[str, Any] = tokenizer([test_inputs] , return_tensors="tf" )
lowerCamelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase : Optional[int] = python_outputs[key].numpy()
lowerCamelCase : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowerCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def _lowercase ( self ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Dict = tf.function(__lowerCAmelCase )
for test_inputs in self.test_sentences:
lowerCamelCase : List[Any] = tf.constant(__lowerCAmelCase )
lowerCamelCase : Optional[Any] = compiled_tokenizer(__lowerCAmelCase )
lowerCamelCase : List[str] = tf_tokenizer(__lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowercase ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : int = ModelToSave(tokenizer=__lowerCAmelCase )
lowerCamelCase : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase : List[str] = model.serving(__lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase : Any = Path(__lowerCAmelCase ) / "saved.model"
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={"serving_default": model.serving} )
lowerCamelCase : Dict = tf.saved_model.load(__lowerCAmelCase )
lowerCamelCase : Dict = loaded_model.signatures["serving_default"](__lowerCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowercase ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase : Optional[Any] = tf_tokenizer(__lowerCAmelCase ) # Build model with some sample inputs
lowerCamelCase : int = tf_tokenizer.get_config()
lowerCamelCase : str = TFGPTaTokenizer.from_config(__lowerCAmelCase )
lowerCamelCase : Tuple = model_from_config(__lowerCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowercase ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase : Dict = 12_3123
for max_length in [3, 5, 1024]:
lowerCamelCase : str = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase : List[str] = tf_tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase )
lowerCamelCase : List[str] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311 |
from __future__ import annotations
from math import pi
def _UpperCamelCase (a__ :float , a__ :float , a__ :float ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 619 | 0 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = 1_0
lowercase = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowercase = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowerCAmelCase__ ) ),
} , features=lowerCAmelCase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCAmelCase__ )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : Tuple ="""\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowercase = FILE_CONTENT
with open(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ )
return filename
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
import bza
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with bza.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with gzip.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with lza.frame.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCAmelCase__ , """w""" ) as archive:
archive.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
import tarfile
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCAmelCase__ , """w""" ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
import lzma
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with lzma.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
import zipfile
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Optional[Any]:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with zstd.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowercase = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ )
return filename
__lowerCAmelCase : int =[
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
__lowerCAmelCase : str =[
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
__lowerCAmelCase : Optional[Any] ={
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : List[Any] =[
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
__lowerCAmelCase : Dict =[
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase = datasets.Dataset.from_dict(lowerCAmelCase__ )
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCAmelCase__ ) ) as con:
lowercase = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> str:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCAmelCase__ , """w""" , newline="""""" ) as f:
lowercase = csv.DictWriter(lowerCAmelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCAmelCase__ , """w""" , newline="""""" ) as f:
lowercase = csv.DictWriter(lowerCAmelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
import bza
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCAmelCase__ , """rb""" ) as f:
lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Any:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowercase = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCAmelCase__ , """wb""" ) as f:
lowercase = pq.ParquetWriter(lowerCAmelCase__ , schema=lowerCAmelCase__ )
lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase__ ) )] for k in DATA[0]} , schema=lowerCAmelCase__ )
writer.write_table(lowerCAmelCase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowercase = {"""data""": DATA}
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowercase = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Dict ) -> List[str]:
'''simple docstring'''
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCAmelCase__ , """rb""" ) as orig_file:
with gzip.open(lowerCAmelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCAmelCase__ , """rb""" ) as orig_file:
with gzip.open(lowerCAmelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.join("""nested""" , os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase__ , """w""" ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
f.add(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase__ , """w""" ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join("""nested""" , os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCAmelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCAmelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
f.write(lowerCAmelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
lowercase = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCAmelCase__ , """w""" ) as f:
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ) )
f.write(lowerCAmelCase__ , arcname=os.path.basename(lowerCAmelCase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 197 | """simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Union[str, Any] =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowerCAmelCase : List[str] =subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
__lowerCAmelCase : List[str] ="""|""".join(sys.argv[1:])
__lowerCAmelCase : Dict =re.compile(RF"""^({joined_dirs}).*?\.py$""")
__lowerCAmelCase : int =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 197 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase = BlipProcessor(a__ , a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict , **a__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).tokenizer
def __snake_case ( self : Dict , **a__ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[str] ):
UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ , return_token_type_ids=a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : str ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(a__ )
UpperCAmelCase = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 51 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = ["pixel_values"]
def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : int = size if size is not None else {"shortest_edge": 224}
lowerCamelCase__ : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
lowerCamelCase__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="crop_size" )
lowerCamelCase__ : List[str] = do_resize
lowerCamelCase__ : int = size
lowerCamelCase__ : Optional[int] = resample
lowerCamelCase__ : List[str] = do_center_crop
lowerCamelCase__ : Dict = crop_size
lowerCamelCase__ : List[Any] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Optional[Any] = do_convert_rgb
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
'''simple docstring'''
lowerCamelCase__ : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCamelCase__ : Any = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
lowerCamelCase__ : Any = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Dict = size if size is not None else self.size
lowerCamelCase__ : str = get_size_dict(__lowerCamelCase , param_name="size" , default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict = resample if resample is not None else self.resample
lowerCamelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : int = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" , default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase__ : int = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Any = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 5 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 1 |
"""simple docstring"""
def snake_case ( ):
return [list(range(10_00 - i ,-10_00 - i ,-1 ) ) for i in range(10_00 )]
lowerCamelCase_ = generate_large_matrix()
lowerCamelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def snake_case ( A__ ):
assert all(row == sorted(UpperCAmelCase__ ,reverse=UpperCAmelCase__ ) for row in grid )
assert all(list(UpperCAmelCase__ ) == sorted(UpperCAmelCase__ ,reverse=UpperCAmelCase__ ) for col in zip(*UpperCAmelCase__ ) )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = len(UpperCAmelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase_ : Optional[int] = (left + right) // 2
UpperCAmelCase_ : Any = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase_ : int = mid + 1
else:
UpperCAmelCase_ : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCAmelCase__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = len(grid[0] )
for i in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCAmelCase__ ) * len(grid[0] )) - total
def snake_case ( A__ ):
return len([number for row in grid for number in row if number < 0] )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = 0
for row in grid:
for i, number in enumerate(UpperCAmelCase__ ):
if number < 0:
total += len(UpperCAmelCase__ ) - i
break
return total
def snake_case ( ):
from timeit import timeit
print("Running benchmarks" )
UpperCAmelCase_ : Tuple = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase_ : Tuple = timeit(F"""{func}(grid=grid)""" ,setup=UpperCAmelCase__ ,number=5_00 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 95 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : int = 'unispeech'
def __init__( self : int , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : str=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[Any]=3_072 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-5 , UpperCamelCase__ : Optional[int]="group" , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[str]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : str=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=128 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=0.05 , UpperCamelCase__ : int=10 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : int=320 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : str=256 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any=256 , UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=0.5 , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowercase_ = hidden_size
lowercase_ = feat_extract_norm
lowercase_ = feat_extract_activation
lowercase_ = list(UpperCamelCase__ )
lowercase_ = list(UpperCamelCase__ )
lowercase_ = list(UpperCamelCase__ )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layerdrop
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = num_ctc_classes
lowercase_ = vocab_size
lowercase_ = do_stable_layer_norm
lowercase_ = use_weighted_layer_sum
lowercase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = apply_spec_augment
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ = num_codevectors_per_group
lowercase_ = num_codevector_groups
lowercase_ = contrastive_logits_temperature
lowercase_ = feat_quantizer_dropout
lowercase_ = num_negatives
lowercase_ = codevector_dim
lowercase_ = proj_codevector_dim
lowercase_ = diversity_loss_weight
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# pretraining loss
lowercase_ = replace_prob
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 412 | 0 |
import baseaa
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(__magic_name__ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Any = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str=None , __magic_name__ : Any=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : float
snake_case__ : str
snake_case__ : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int = 4_2
snake_case__ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """titi"""
snake_case__ : Optional[Any] = """toto"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = """titi"""
snake_case__ : Tuple = """toto"""
snake_case__ : Tuple = 4_2
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : BasicEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[Any] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : MixedTypeEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[str] = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[int] = None
snake_case__ : Optional[float] = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : Optional[str] = None
snake_case__ : Optional[List[str]] = list_field(default=[] )
snake_case__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = list_field(default=[] )
snake_case__ : List[int] = list_field(default=[1, 2, 3] )
snake_case__ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = field()
snake_case__ : str = field()
snake_case__ : BasicEnum = field()
def _A ( self : Dict ):
UpperCamelCase :List[str] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : "BasicEnum" = field()
snake_case__ : "Optional[bool]" = None
snake_case__ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
snake_case__ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int | None = None
snake_case__ : float | None = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : str | None = None
snake_case__ : list[str] | None = list_field(default=[] )
snake_case__ : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Dict , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase :List[Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
UpperCamelCase :Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __lowerCamelCase ) and yy.get("""choices""" , __lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__lowerCamelCase ) , yy["""type"""](__lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--bar""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--baz""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--flag""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :str = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((UpperCamelCase) , ) :List[Any] = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase )
self.assertFalse(example.flag )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
UpperCamelCase :Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase :str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCamelCase :List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _A ( self : List[str] ):
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _A ( self : Tuple ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_args([] )
self.assertEqual(
__lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase :Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--bar""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
UpperCamelCase :List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) )
UpperCamelCase :List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _A ( self : Any ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--required_str""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Dict = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
UpperCamelCase :List[str] = parser.parse_dict(__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :int = os.path.join(__lowerCamelCase , """temp_json""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCamelCase :Dict = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Any = os.path.join(__lowerCamelCase , """temp_yaml""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCamelCase :List[str] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 590 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list[int | float] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
lowercase : Dict = (left + right) >> 1 # the middle
lowercase : str = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
lowercase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 583 |
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase_ ( ):
lowercase : Optional[Any] = input('''Enter message: ''' )
lowercase : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowercase : Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : str = '''encrypt'''
lowercase : Optional[Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('''d''' ):
lowercase : str = '''decrypt'''
lowercase : Optional[int] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''encrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''decrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowercase : Optional[Any] = []
lowercase : Tuple = 0
lowercase : str = key.upper()
for symbol in message:
lowercase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
lowercase : List[str] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 583 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( __A , __A , __A ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
_lowerCamelCase : Tuple = GPTaConfig()
else:
_lowerCamelCase : str = GPTaConfig.from_json_file(__A )
_lowerCamelCase : int = GPTaModel(__A )
# Load weights from numpy
load_tf_weights_in_gpta(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_lowerCamelCase : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowerCAmelCase : Dict =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Optional[Any] = "BlipImageProcessor"
UpperCAmelCase__ : Optional[Any] = "AutoTokenizer"
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.image_processor
def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase : Union[str, Any] = self.tokenizer
UpperCamelCase : Optional[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
return text_encoding
# add pixel_values
UpperCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_ )
if text is not None:
UpperCamelCase : Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCAmelCase_ : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : Union[str, Any] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : str = {
"unc-nlp/lxmert-base-uncased": 512,
}
UpperCAmelCase_ : Optional[int] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = LxmertTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
_a : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars
):
_a : str = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) )
_a : Tuple = do_lower_case
_a : str = strip_accents
_a : Optional[Any] = tokenize_chinese_chars
_a : Dict = normalizer_class(**lowerCamelCase_ )
_a : Optional[int] = do_lower_case
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
_a : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
_a : Optional[Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 120 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowercase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : Optional[Any] = 2_56
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = ["""melgan"""]
def __init__( self : Optional[int] , _lowercase : SpectrogramNotesEncoder , _lowercase : SpectrogramContEncoder , _lowercase : TaFilmDecoder , _lowercase : DDPMScheduler , _lowercase : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
__UpperCAmelCase = math.log(1E-5 ) # Matches MelGAN training.
__UpperCAmelCase = 4.0 # Largest value for most examples
__UpperCAmelCase = 1_28
self.register_modules(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
def a ( self : str , _lowercase : Tuple , _lowercase : Dict=(-1.0, 1.0) , _lowercase : Dict=False ):
__UpperCAmelCase , __UpperCAmelCase = output_range
if clip:
__UpperCAmelCase = torch.clip(_lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
__UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a ( self : str , _lowercase : Optional[Any] , _lowercase : int=(-1.0, 1.0) , _lowercase : Optional[Any]=False ):
__UpperCAmelCase , __UpperCAmelCase = input_range
__UpperCAmelCase = torch.clip(_lowercase , _lowercase , _lowercase ) if clip else outputs
# Scale to [0, 1].
__UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a ( self : Any , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = input_tokens > 0
__UpperCAmelCase , __UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=_lowercase , encoder_inputs_mask=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = self.continuous_encoder(
encoder_inputs=_lowercase , encoder_inputs_mask=_lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a ( self : str , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = noise_time
if not torch.is_tensor(_lowercase ):
__UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase = self.decoder(
encodings_and_masks=_lowercase , decoder_input_tokens=_lowercase , decoder_noise_time=_lowercase )
return logits
@torch.no_grad()
def __call__( self : Tuple , _lowercase : List[List[int]] , _lowercase : Optional[torch.Generator] = None , _lowercase : int = 1_00 , _lowercase : bool = True , _lowercase : str = "numpy" , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowercase )}.''' )
__UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
__UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowercase ):
if i == 0:
__UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCAmelCase = ones
__UpperCAmelCase = self.scale_features(
_lowercase , output_range=[-1.0, 1.0] , clip=_lowercase )
__UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowercase , continuous_mask=_lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.decode(
encodings_and_masks=_lowercase , input_tokens=_lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__UpperCAmelCase = self.scale_to_features(_lowercase , input_range=[-1.0, 1.0] )
__UpperCAmelCase = mel[:1]
__UpperCAmelCase = mel.cpu().float().numpy()
__UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase )
logger.info('''Generated segment''' , _lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowercase )
| 721 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowerCAmelCase )} )
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
a__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
a__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
a__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
a__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
a__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "train"
a__ : List[Any] = "dev"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__( self : Optional[Any] , _lowercase : SquadDataTrainingArguments , _lowercase : PreTrainedTokenizer , _lowercase : Optional[int] = None , _lowercase : Union[str, Split] = Split.train , _lowercase : Optional[bool] = False , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "pt" , ):
__UpperCAmelCase = args
__UpperCAmelCase = is_language_sensitive
__UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
__UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__UpperCAmelCase = mode
# Load data features from cache or dataset file
__UpperCAmelCase = '''v2''' if args.version_2_with_negative else '''v1'''
__UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
__UpperCAmelCase = time.time()
__UpperCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase = self.old_features['''features''']
__UpperCAmelCase = self.old_features.get('''dataset''' , _lowercase )
__UpperCAmelCase = self.old_features.get('''examples''' , _lowercase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
__UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
__UpperCAmelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Any , _lowercase : Optional[int] ):
# Convert to Tensors and build dataset
__UpperCAmelCase = self.features[i]
__UpperCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__UpperCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 397 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = int(A__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = t // 3600, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def __a ( A__ : Dict , A__ : Union[str, Any] , A__ : Dict , A__ : Tuple , A__ : Union[str, Any]=300 ):
# docstyle-ignore
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE = F"{elt:.6f}" if isinstance(A__ , A__ ) else str(A__ )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = 5
lowerCamelCase__ = 0.2
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Optional["NotebookTrainingTracker"] = None , __lowerCamelCase : int = 300 , ):
SCREAMING_SNAKE_CASE = total
SCREAMING_SNAKE_CASE = "" if prefix is None else prefix
SCREAMING_SNAKE_CASE = leave
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = width
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : bool = False , __lowerCamelCase : str = None ):
SCREAMING_SNAKE_CASE = value
if comment is not None:
SCREAMING_SNAKE_CASE = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.warmup
SCREAMING_SNAKE_CASE = 1
self.update_bar(__lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE = None
if value >= self.total:
SCREAMING_SNAKE_CASE = self.total
SCREAMING_SNAKE_CASE = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCamelCase )
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE = 1
else:
SCREAMING_SNAKE_CASE = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _snake_case ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE = " " * (len(str(self.total ) ) - len(str(__lowerCamelCase ) )) + str(__lowerCamelCase )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
SCREAMING_SNAKE_CASE = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
f" {format_time(self.predicted_remaining )}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]"
self.display()
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _snake_case ( self : Union[str, Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=None ):
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE = None
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ):
if self.inner_table is None:
SCREAMING_SNAKE_CASE = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = columns
self.inner_table.append([values[c] for c in columns] )
def _snake_case ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=300 ):
SCREAMING_SNAKE_CASE = NotebookProgressBar(__lowerCamelCase , prefix=__lowerCamelCase , parent=self , width=__lowerCamelCase )
return self.child_bar
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = None
self.display()
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict ):
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str , **__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
SCREAMING_SNAKE_CASE = NotebookTrainingTracker(state.max_steps , __lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any=None , **__lowerCamelCase : int ):
if not has_length(__lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE = self.training_tracker.add_child(len(__lowerCamelCase ) )
else:
SCREAMING_SNAKE_CASE = NotebookProgressBar(len(__lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE = None
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE = state.global_step
self.training_tracker.write_line(__lowerCamelCase )
def _snake_case ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : int ):
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE = log["loss"]
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE = int(state.epoch )
else:
SCREAMING_SNAKE_CASE = state.global_step
SCREAMING_SNAKE_CASE = "eval"
for k in metrics:
if k.endswith("_loss" ):
SCREAMING_SNAKE_CASE = re.sub(r"\_loss$" , "" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop("total_flos" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop("epoch" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_runtime" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_samples_per_second" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_steps_per_second" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , __lowerCamelCase )
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
SCREAMING_SNAKE_CASE = v
else:
SCREAMING_SNAKE_CASE = k.split("_" )
SCREAMING_SNAKE_CASE = " ".join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE = v
self.training_tracker.write_line(__lowerCamelCase )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE = True
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , **__lowerCamelCase : Any ):
self.training_tracker.update(
state.global_step , comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__lowerCamelCase )
SCREAMING_SNAKE_CASE = None | 16 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str )-> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
UpperCamelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase = c.n_embd + 1 # int
UpperCamelCase = c.resid_pdrop + 1.0 # float
UpperCamelCase = not c.scale_attn_weights # bool
UpperCamelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[str]:
"""simple docstring"""
UpperCamelCase = PretrainedConfig()
UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(UpperCAmelCase_ )}." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained("bert-base-cased" )
UpperCamelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase = ["config.42.0.0.json"]
UpperCamelCase = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) )
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict:
"""simple docstring"""
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCamelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
UpperCamelCase = "v4.0.0"
UpperCamelCase , UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase = "v3.0.0"
UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 554 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 698 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = StableDiffusionInpaintPipeline
UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : str = frozenset([] )
def __snake_case ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A_ , )
lowerCAmelCase = PNDMScheduler(skip_prk_steps=A_ )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase = CLIPTextModel(A_ )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __snake_case ( self , A_ , A_=0 ) -> Tuple:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(A_ ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(A_ ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(A_ )
else:
lowerCAmelCase = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInpaintPipeline(**A_ )
lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCAmelCase = self.get_dummy_inputs(A_ )
lowerCAmelCase = sd_pipe(**A_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ) -> int:
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __snake_case ( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = PNDMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
A_ , safety_checker=A_ , scheduler=A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 433 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
UpperCAmelCase : List[str] = NllbTokenizer
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , A_=False , **A_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , legacy_behaviour=A_ , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __snake_case ( self ) -> str:
return self._src_lang
@src_lang.setter
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self , A_ , A_ , A_ , A_ , **A_ ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
lowerCAmelCase = tgt_lang_id
return inputs
def __snake_case ( self , A_ , A_ = "eng_Latn" , A_ = None , A_ = "fra_Latn" , **A_ , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __snake_case ( self ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,) | 433 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : int = 'tf'
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
__snake_case : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : Any = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Dict = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Any = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : int = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Tuple = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : int = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Any = FeaturesManager.determine_framework(self.test_model )
| 708 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case__ :
def __init__( self : str , __a : Dict , __a : Optional[Any]=13 , __a : Tuple=7 , __a : Any=True , __a : int=True , __a : List[str]=True , __a : Any=True , __a : Optional[Any]=99 , __a : Any=64 , __a : str=5 , __a : List[Any]=4 , __a : Optional[Any]=37 , __a : List[Any]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Tuple=0.0_2 , __a : List[Any]=3 , __a : Dict=4 , __a : Dict=None , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Tuple = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = use_input_mask
__snake_case : Dict = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : int = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : str = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[int] = num_labels
__snake_case : Dict = num_choices
__snake_case : int = scope
__snake_case : List[str] = vocab_size - 1
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def A_ ( self : Dict ) -> int:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def A_ ( self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : int ) -> Any:
'''simple docstring'''
__snake_case : int = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
__snake_case : List[Any] = model(__a , attention_mask=__a )
__snake_case : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Optional[Any] , __a : Union[str, Any] , __a : List[str] , __a : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = True
__snake_case : Tuple = GPTNeoXModel(__a )
model.to(__a )
model.eval()
__snake_case : Optional[int] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Dict , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
__snake_case : str = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , __a : List[str] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : List[str] = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
__snake_case : List[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , __a : Optional[int] , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Any = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , __a : str , __a : List[str] , __a : Dict , __a : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : List[Any] = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
__snake_case : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : int , __a : List[str] , __a : str , __a : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = True
__snake_case : List[Any] = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__snake_case : Dict = model(__a , attention_mask=__a , use_cache=__a )
__snake_case : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple = model(__a , attention_mask=__a , output_hidden_states=__a )
__snake_case : str = output_from_no_past['hidden_states'][0]
__snake_case : Optional[int] = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
__snake_case : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A__ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = GPTNeoXModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : str ) -> int:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Dict = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A_ ( self : Union[str, Any] , __a : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : List[str] = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
__snake_case : Optional[Any] = original_model(__a ).last_hidden_state
__snake_case : str = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : int = {'type': scaling_type, 'factor': 1_0.0}
__snake_case : Optional[Any] = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__snake_case : Union[str, Any] = scaled_model(__a ).last_hidden_state
__snake_case : Any = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__snake_case : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
__snake_case : List[str] = tokenizer('My favorite food is' , return_tensors='pt' ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__snake_case : List[Any] = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
__snake_case : str = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
| 124 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> Dict:
"""simple docstring"""
if audio_length_in_s is None:
UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCamelCase = int(SCREAMING_SNAKE_CASE )
if sample_size % down_scale_factor != 0:
UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
UpperCamelCase = int(SCREAMING_SNAKE_CASE )
UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=audio.device )
UpperCamelCase = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE )
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
'''simple docstring'''
import argparse
lowerCAmelCase_ = '''docs/source/_static/js/custom.js'''
def A__ ( A : Dict):
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" , newline="\n") as f:
UpperCamelCase : Dict = f.readlines()
UpperCamelCase : str = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion ="):
index += 1
UpperCamelCase : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {"):
index += 1
# We go until the end
while not lines[index].startswith("}"):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_UpperCAmelCase)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 716 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''timesformer'''
def __init__( self , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=8 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase="divided_space_time" , lowerCamelCase=0 , **lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : int = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = qkv_bias
UpperCamelCase : int = attention_type
UpperCamelCase : int = drop_path_rate
| 435 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase_ : str , lowercase_ : Any ):
"""simple docstring"""
a_ = b.T
a_ = np.sum(np.square(__a ) , axis=1 )
a_ = np.sum(np.square(__a ) , axis=0 )
a_ = np.matmul(__a , __a )
a_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCamelCase ( lowercase_ : List[str] , lowercase_ : str ):
"""simple docstring"""
a_ = x.reshape(-1 , 3 )
a_ = squared_euclidean_distance(__a , __a )
return np.argmin(__a , axis=1 )
class __SCREAMING_SNAKE_CASE (lowercase__ ):
"""simple docstring"""
_a : Any = ['''pixel_values''']
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = True , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(**_a )
a_ = size if size is not None else {'height': 256, 'width': 256}
a_ = get_size_dict(_a )
a_ = np.array(_a ) if clusters is not None else None
a_ = do_resize
a_ = size
a_ = resample
a_ = do_normalize
a_ = do_color_quantize
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_a , size=(size['height'], size['width']) , resample=_a , data_format=_a , **_a )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ = None , ):
"""simple docstring"""
a_ = rescale(image=_a , scale=1 / 127.5 , data_format=_a )
a_ = image - 1
return image
def _a ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = do_resize if do_resize is not None else self.do_resize
a_ = size if size is not None else self.size
a_ = get_size_dict(_a )
a_ = resample if resample is not None else self.resample
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a_ = clusters if clusters is not None else self.clusters
a_ = np.array(_a )
a_ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a_ = [to_numpy_array(_a ) for image in images]
if do_resize:
a_ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_normalize:
a_ = [self.normalize(image=_a ) for image in images]
if do_color_quantize:
a_ = [to_channel_dimension_format(_a , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a_ = np.array(_a )
a_ = color_quantize(_a , _a ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a_ = images.shape[0]
a_ = images.reshape(_a , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a_ = list(_a )
else:
a_ = [to_channel_dimension_format(_a , _a ) for image in images]
a_ = {'input_ids': images}
return BatchFeature(data=_a , tensor_type=_a )
| 536 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''gpt_neo'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,_a : Optional[int]=5_0257 ,_a : Tuple=2048 ,_a : Optional[int]=2048 ,_a : Any=24 ,_a : Tuple=[[["global", "local"], 12]] ,_a : Union[str, Any]=16 ,_a : List[Any]=None ,_a : Optional[int]=256 ,_a : Optional[Any]="gelu_new" ,_a : List[Any]=0.0 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=1E-5 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Any=5_0256 ,_a : Tuple=5_0256 ,**_a : List[str] ,):
'''simple docstring'''
_a : Dict = vocab_size
_a : Union[str, Any] = max_position_embeddings
_a : List[str] = hidden_size
_a : Optional[Any] = num_layers
_a : Optional[Any] = num_heads
_a : Dict = intermediate_size
_a : Any = window_size
_a : List[str] = activation_function
_a : int = resid_dropout
_a : Tuple = embed_dropout
_a : int = attention_dropout
_a : Dict = classifier_dropout
_a : Tuple = layer_norm_epsilon
_a : List[str] = initializer_range
_a : str = use_cache
_a : List[str] = bos_token_id
_a : Optional[Any] = eos_token_id
_a : Tuple = attention_types
_a : Union[str, Any] = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
_a : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Tuple , __a : Dict ):
"""simple docstring"""
import torch
_a : Tuple = input.size()
_a : Union[str, Any] = len(__a )
_a : Union[str, Any] = shape[dimension]
_a : str = torch.arange(0 , __a , __a )
_a : Optional[Any] = torch.div(sizedim - size , __a , rounding_mode='floor' ) + 1
_a : str = torch.arange(__a ) + low_indices[:min_length][:, None]
_a : Optional[Any] = [slice(__a )] * rank
_a : Dict = indices
_a : List[str] = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] ):
"""simple docstring"""
import torch
_a : List[str] = torch.arange(1 , __a )
_a : int = torch.remainder(__a , __a )
_a : Tuple = remainders == 0
_a : Optional[Any] = candidates[divisor_indices]
_a : List[Any] = torch.max(__a )
return largest_divisor, torch.div(__a , __a , rounding_mode='floor' )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_a ,direction='inputs' )
_a : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._config.num_heads
def __lowercase ( self : Any ,_a : PreTrainedTokenizer ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional[TensorType] = None ,):
'''simple docstring'''
_a : Dict = super(_a ,self ).generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
# We need to order the input in the way they appears in the forward()
_a : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a, _a : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Any = seqlen + 2
_a : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Tuple = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : List[str] = common_inputs['attention_mask']
if self.use_past:
_a : Optional[int] = ordered_inputs['attention_mask'].dtype
_a : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_a ,_a ,dtype=_a )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 229 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowercase__ =TypeVar('T')
lowercase__ =TypeVar('U')
class a_ ( Generic[T, U] ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
a_ = key
a_ = val
a_ = None
a_ = None
def __repr__( self ):
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class a_ ( Generic[T, U] ):
def __init__( self ):
a_ = DoubleLinkedListNode(UpperCAmelCase , UpperCAmelCase )
a_ = DoubleLinkedListNode(UpperCAmelCase , UpperCAmelCase )
a_ , a_ = self.rear, self.head
def __repr__( self ):
a_ = ["""DoubleLinkedList"""]
a_ = self.head
while node.next is not None:
rep.append(str(UpperCAmelCase ) )
a_ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
a_ = node
a_ = previous
a_ = node
a_ = self.rear
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if node.prev is None or node.next is None:
return None
a_ = node.next
a_ = node.prev
a_ = None
a_ = None
return node
class a_ ( Generic[T, U] ):
lowerCamelCase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , UpperCAmelCase ):
a_ = DoubleLinkedList()
a_ = capacity
a_ = 0
a_ = 0
a_ = 0
a_ = {}
def __repr__( self ):
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , UpperCAmelCase ):
return key in self.cache
def lowerCAmelCase__ ( self , UpperCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
a_ = self.cache[key]
a_ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase )
return node.val
self.miss += 1
return None
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
a_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
a_ = DoubleLinkedListNode(UpperCAmelCase , UpperCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
a_ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
a_ = value
self.list.add(UpperCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase = 1_28 ):
def cache_decorator_inner(UpperCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
a_ = LRUCache(UpperCAmelCase )
a_ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
a_ = func(*UpperCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase , """cache_info""" , UpperCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowercase__ ={
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = RobertaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
a_ = add_prefix_space
a_ = pre_tok_class(**UpperCAmelCase )
a_ = add_prefix_space
a_ = """post_processor"""
a_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
a_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ = tuple(state["""sep"""] )
if "cls" in state:
a_ = tuple(state["""cls"""] )
a_ = False
if state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = add_prefix_space
a_ = True
if state.get("""trim_offsets""" , UpperCAmelCase ) != trim_offsets:
a_ = trim_offsets
a_ = True
if changes_to_apply:
a_ = getattr(UpperCAmelCase , state.pop("""type""" ) )
a_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
a_ = value
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 511 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ :List[str] = imread('image_data/lena.jpg', 1)
# convert to its negative
a_ :Optional[Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 35 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
lowerCamelCase__ : int =dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCamelCase__ : Any =dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : int =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : Tuple =dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any ={'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : List[Any] ={'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : Dict =Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict =1
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : int =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : str =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Union[str, Any] =FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Any =faiss.IndexFlat(5 )
lowerCamelCase__ : Any =FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : int =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : Any =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : str =1
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
import faiss
lowerCamelCase__ : List[str] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] ='index.faiss'
lowerCamelCase__ : Optional[Any] =f"""mock://{index_name}"""
index.save(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : Dict =FaissIndex.load(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : List[Any] =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ , lowerCamelCase__ : List[str] =index.search(snake_case_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Union[str, Any] =Elasticsearch()
lowerCamelCase__ : int ={'acknowledged': True}
lowerCamelCase__ : Optional[Any] =ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Union[str, Any] ='foo'
lowerCamelCase__ : Optional[Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : List[str] ='foo'
lowerCamelCase__ : Union[str, Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] =['foo', 'bar', 'foobar']
lowerCamelCase__ : str ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =index.search_batch(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =[scores[0] for scores in total_scores]
lowerCamelCase__ : Dict =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCamelCase__ : str =['foo', 'bar', 'foobar']
lowerCamelCase__ : Any ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : int =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ ) | 174 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ''
else:
lowercase__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = dct.pop(_SCREAMING_SNAKE_CASE )
lowercase__ = val
def __UpperCamelCase () -> str:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = ViTConfig()
lowercase__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase__ = True
lowercase__ = int(vit_name[-12:-10] )
lowercase__ = int(vit_name[-9:-6] )
else:
lowercase__ = 1000
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = int(vit_name[-6:-4] )
lowercase__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowercase__ = 192
lowercase__ = 768
lowercase__ = 12
lowercase__ = 3
elif vit_name[9:].startswith('small' ):
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowercase__ = 768
lowercase__ = 2304
lowercase__ = 8
lowercase__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 24
lowercase__ = 16
elif vit_name[4:].startswith('huge' ):
lowercase__ = 1280
lowercase__ = 5120
lowercase__ = 32
lowercase__ = 16
# load original model from timm
lowercase__ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
lowercase__ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase__ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
lowercase__ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase__ = DeiTImageProcessor(size=config.image_size )
else:
lowercase__ = ViTImageProcessor(size=config.image_size )
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = encoding['pixel_values']
lowercase__ = model(_SCREAMING_SNAKE_CASE )
if base_model:
lowercase__ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
lowercase__ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 721 |
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45 | 0 |
'''simple docstring'''
import numpy as np
import datasets
UpperCamelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCamelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCamelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float', id='sequence' ), id='X' ),
} ), )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.array(A )
SCREAMING_SNAKE_CASE : Tuple = np.array(A )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
SCREAMING_SNAKE_CASE : Any = X - np.mean(A )
SCREAMING_SNAKE_CASE : str = np.cov(reference_distribution.T )
try:
SCREAMING_SNAKE_CASE : str = np.linalg.inv(A )
except np.linalg.LinAlgError:
SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.pinv(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 28 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = RoCBertTokenizer
_snake_case : int = None
_snake_case : Optional[Any] = False
_snake_case : Tuple = True
_snake_case : Union[str, Any] = filter_non_english
def A ( self : List[str] )-> Dict:
super().setUp()
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__UpperCamelCase = {}
__UpperCamelCase = {}
for i, value in enumerate(A_ ):
__UpperCamelCase = i
__UpperCamelCase = i
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def A ( self : Dict )-> Optional[Any]:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(A_ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def A ( self : List[Any] )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def A ( self : str )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : Union[str, Any] )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def A ( self : Any )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : List[Any] )-> int:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Optional[Any] )-> str:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Any )-> int:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def A ( self : int )-> int:
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCamelCase = {}
for i, token in enumerate(A_ ):
__UpperCamelCase = i
__UpperCamelCase = RoCBertWordpieceTokenizer(vocab=A_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def A ( self : int )-> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def A ( self : Optional[int] )-> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def A ( self : str )-> str:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
__UpperCamelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def A ( self : str )-> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase = tokenizer_r.do_lower_case if hasattr(A_ , "do_lower_case" ) else False
__UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = ["的", "人", "有"]
__UpperCamelCase = "".join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = True
__UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase = False
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def A ( self : List[Any] )-> int:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase = tokenizer.encode("你好" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode("你是谁" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A ( self : Optional[Any] )-> Tuple:
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase = "你好,你是谁"
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase = tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ ) | 505 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
_A : List[str] = ["""image_processor""", """tokenizer"""]
_A : List[Any] = """BlipImageProcessor"""
_A : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = self.image_processor
def __call__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
snake_case__ = self.tokenizer
snake_case__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
snake_case__ = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
snake_case__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
snake_case__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def A_ ( self ):
snake_case__ = self.tokenizer.model_input_names
snake_case__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 702 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ , snake_case__ = set(__lowerCAmelCase ), [start]
while stack:
snake_case__ = stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
__magic_name__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 530 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __snake_case (lowerCamelCase ):
__a = '''encodec'''
def __init__( self: Union[str, Any] , A_: Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A_: Any=2_40_00 , A_: List[Any]=1 , A_: Optional[Any]=False , A_: Optional[int]=None , A_: int=None , A_: List[str]=1_28 , A_: Union[str, Any]=32 , A_: List[str]=1 , A_: Dict=[8, 5, 4, 2] , A_: List[Any]="weight_norm" , A_: Any=7 , A_: List[Any]=7 , A_: Tuple=3 , A_: List[str]=2 , A_: Optional[Any]=True , A_: Optional[int]="reflect" , A_: Dict=2 , A_: Union[str, Any]=2 , A_: Union[str, Any]=1.0 , A_: List[str]=10_24 , A_: str=None , A_: List[str]=True , **A_: int , ):
__lowerCamelCase = target_bandwidths
__lowerCamelCase = sampling_rate
__lowerCamelCase = audio_channels
__lowerCamelCase = normalize
__lowerCamelCase = chunk_length_s
__lowerCamelCase = overlap
__lowerCamelCase = hidden_size
__lowerCamelCase = num_filters
__lowerCamelCase = num_residual_layers
__lowerCamelCase = upsampling_ratios
__lowerCamelCase = norm_type
__lowerCamelCase = kernel_size
__lowerCamelCase = last_kernel_size
__lowerCamelCase = residual_kernel_size
__lowerCamelCase = dilation_growth_rate
__lowerCamelCase = use_causal_conv
__lowerCamelCase = pad_mode
__lowerCamelCase = compress
__lowerCamelCase = num_lstm_layers
__lowerCamelCase = trim_right_ratio
__lowerCamelCase = codebook_size
__lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**A_ )
@property
def __a ( self: Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self: List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self: List[Any] ):
__lowerCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self: List[Any] ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 281 | 0 |
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( a : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373 |
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ = '''1'''
snake_case__ = '''0'''
snake_case__ = '''1'''
snake_case__ = ort.SessionOptions()
snake_case__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ = ort.RunOptions()
snake_case__ = 128
snake_case__ = 1
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ = time.time()
snake_case__ = 2000
snake_case__ = {}
for iter in range(max_iters):
snake_case__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 373 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase__ :
def __init__( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[Any]=True , snake_case__ : Dict=True , snake_case__ : Optional[int]=False , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=99 , snake_case__ : Optional[Any]=32 , snake_case__ : Dict=5 , snake_case__ : Any=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Dict=512 , snake_case__ : Dict=16 , snake_case__ : str=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : str=3 , snake_case__ : str=4 , snake_case__ : Union[str, Any]=None , ):
lowerCamelCase_ : Tuple =parent
lowerCamelCase_ : Tuple =batch_size
lowerCamelCase_ : Dict =seq_length
lowerCamelCase_ : int =is_training
lowerCamelCase_ : int =use_input_mask
lowerCamelCase_ : Union[str, Any] =use_token_type_ids
lowerCamelCase_ : str =use_labels
lowerCamelCase_ : List[Any] =vocab_size
lowerCamelCase_ : List[str] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : Optional[Any] =intermediate_size
lowerCamelCase_ : int =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : List[str] =attention_probs_dropout_prob
lowerCamelCase_ : List[Any] =max_position_embeddings
lowerCamelCase_ : int =type_vocab_size
lowerCamelCase_ : int =type_sequence_label_size
lowerCamelCase_ : str =initializer_range
lowerCamelCase_ : Tuple =num_labels
lowerCamelCase_ : Tuple =num_choices
lowerCamelCase_ : Optional[Any] =scope
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[Any] =None
if self.use_input_mask:
lowerCamelCase_ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict =None
if self.use_token_type_ids:
lowerCamelCase_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Optional[Any] =None
lowerCamelCase_ : int =None
lowerCamelCase_ : Any =None
if self.use_labels:
lowerCamelCase_ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[str] ):
lowerCamelCase_ : int =OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Union[str, Any] =model(snake_case__ , attention_mask=snake_case__ )
lowerCamelCase_ : Any =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str , ):
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : Optional[int] =OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : List[Any] =model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
lowerCamelCase_ : int =model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str , ):
lowerCamelCase_ : List[str] =OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Any =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple , ):
lowerCamelCase_ : int =True
lowerCamelCase_ : Tuple =True
lowerCamelCase_ : List[str] =OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
lowerCamelCase_ : str =model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
lowerCamelCase_ : str =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : Dict =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : Union[str, Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : str =torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : List[str] =torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : Optional[int] =model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
lowerCamelCase_ : Optional[Any] =model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
lowerCamelCase_ : str =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : Any =output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Union[str, Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Tuple =config_and_inputs
lowerCamelCase_ : List[Any] ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_UpperCAmelCase :Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :str = False
_UpperCAmelCase :Union[str, Any] = False
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Union[str, Any] =OpenLlamaModelTester(self )
lowerCamelCase_ : Tuple =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : Optional[int] =type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : List[Any] =input_dict["input_ids"]
lowerCamelCase_ : Tuple =input_ids.ne(1 ).to(snake_case__ )
lowerCamelCase_ : Tuple =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Dict =OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Dict =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Dict =3
lowerCamelCase_ : int ="single_label_classification"
lowerCamelCase_ : Optional[int] =input_dict["input_ids"]
lowerCamelCase_ : Optional[Any] =input_ids.ne(1 ).to(snake_case__ )
lowerCamelCase_ : Dict =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Optional[int] =OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Dict =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ , lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : List[str] ="multi_label_classification"
lowerCamelCase_ : str =input_dict["input_ids"]
lowerCamelCase_ : str =input_ids.ne(1 ).to(snake_case__ )
lowerCamelCase_ : List[Any] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ : int =OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : List[str] =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def UpperCAmelCase__ ( self : Any ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int =ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase_ : str =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Any =OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
lowerCamelCase_ : Optional[int] =original_model(snake_case__ ).last_hidden_state
lowerCamelCase_ : Union[str, Any] =original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Optional[int] ={"type": scaling_type, "factor": 10.0}
lowerCamelCase_ : Optional[int] =OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
lowerCamelCase_ : str =scaled_model(snake_case__ ).last_hidden_state
lowerCamelCase_ : Union[str, Any] =scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 153 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : list[list[int | float]] ) -> int:
lowerCamelCase_ : Optional[Any] =len(lowerCamelCase__ )
lowerCamelCase_ : int =len(matrix[0] )
lowerCamelCase_ : Dict =min(lowerCamelCase__ , lowerCamelCase__ )
for row in range(lowerCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase__ ):
lowerCamelCase_ : Any =matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCamelCase_ : Optional[Any] =True
for i in range(row + 1 , lowerCamelCase__ ):
if matrix[i][row] != 0:
lowerCamelCase_ , lowerCamelCase_ : Tuple =matrix[i], matrix[row]
lowerCamelCase_ : Any =False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase__ ):
lowerCamelCase_ : Optional[int] =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : Dict ={
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'pix2struct_text_model'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _snake_case=5_02_44 , _snake_case=7_68 , _snake_case=64 , _snake_case=20_48 , _snake_case=12 , _snake_case=12 , _snake_case=32 , _snake_case=1_28 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=1.0 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=False , _snake_case=0 , _snake_case=1 , _snake_case=False , _snake_case=True , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = d_kv
__lowerCamelCase = d_ff
__lowerCamelCase = num_layers
__lowerCamelCase = num_heads
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_factor
__lowerCamelCase = use_cache
__lowerCamelCase = eos_token_id
__lowerCamelCase = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase = dense_act_fn
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , )
@classmethod
def _lowerCamelCase ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowerCamelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'pix2struct_vision_model'
def __init__( self , _snake_case=7_68 , _snake_case=7_68 , _snake_case=20_48 , _snake_case=64 , _snake_case=12 , _snake_case=12 , _snake_case="gelu_new" , _snake_case=1E-6 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=1E-10 , _snake_case=1.0 , _snake_case=40_96 , _snake_case=32 , _snake_case=1_28 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
__lowerCamelCase = hidden_size
__lowerCamelCase = patch_embed_hidden_size
__lowerCamelCase = d_ff
__lowerCamelCase = dropout_rate
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = initializer_range
__lowerCamelCase = initializer_factor
__lowerCamelCase = attention_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = dense_act_fn
__lowerCamelCase = seq_len
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = d_kv
@classmethod
def _lowerCamelCase ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__lowerCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'pix2struct'
SCREAMING_SNAKE_CASE_ = True
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=1.0 , _snake_case=0.0_2 , _snake_case=False , _snake_case=False , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case )
if text_config is None:
__lowerCamelCase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__lowerCamelCase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__lowerCamelCase = PixaStructTextConfig(**_snake_case )
__lowerCamelCase = PixaStructVisionConfig(**_snake_case )
__lowerCamelCase = self.text_config.decoder_start_token_id
__lowerCamelCase = self.text_config.pad_token_id
__lowerCamelCase = self.text_config.eos_token_id
__lowerCamelCase = initializer_factor
__lowerCamelCase = initializer_range
__lowerCamelCase = self.initializer_range
__lowerCamelCase = self.initializer_range
__lowerCamelCase = is_vqa
@classmethod
def _lowerCamelCase ( cls , _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.text_config.to_dict()
__lowerCamelCase = self.vision_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 575 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
def lowerCamelCase_ ( A_ ):
if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['pixel_values']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_56}
__lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = offset
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(_snake_case , size['''shortest_edge'''] , default_to_square=_snake_case )
elif "height" in size and "width" in size:
__lowerCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case = True , _snake_case = None , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = image.astype(np.floataa )
if offset:
__lowerCamelCase = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = to_numpy_array(_snake_case )
if do_resize:
__lowerCamelCase = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case )
if do_center_crop:
__lowerCamelCase = self.center_crop(_snake_case , size=_snake_case )
if do_rescale:
__lowerCamelCase = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case )
if do_normalize:
__lowerCamelCase = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case )
__lowerCamelCase = to_channel_dimension_format(_snake_case , _snake_case )
return image
def _lowerCamelCase ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = offset if offset is not None else self.offset
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(_snake_case , param_name='''crop_size''' )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
__lowerCamelCase = make_batched(_snake_case )
__lowerCamelCase = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
__lowerCamelCase = {'''pixel_values''': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 575 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[str] , _lowercase : str = "▁" , _lowercase : bool = True , _lowercase : Union[str, AddedToken] = "<unk>" , _lowercase : Union[str, AddedToken] = "</s>" , _lowercase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
snake_case : Optional[int] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
snake_case : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case : int = token_dict["token"]
snake_case : Union[str, Any] = Tokenizer(Unigram() )
snake_case : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
snake_case : int = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowercase , add_prefix_space=_lowercase ),
pre_tokenizers.Digits(individual_digits=_lowercase ),
pre_tokenizers.Punctuation(),
] )
snake_case : str = decoders.Metaspace(replacement=_lowercase , add_prefix_space=_lowercase )
snake_case : List[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
snake_case : Optional[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_lowercase , _lowercase )
def __lowercase ( self : Union[str, Any] , _lowercase : Union[str, List[str]] , _lowercase : int = 8000 , _lowercase : bool = True , ) -> Any:
snake_case : Any = trainers.UnigramTrainer(
vocab_size=_lowercase , special_tokens=self.special_tokens_list , show_progress=_lowercase , )
if isinstance(_lowercase , _lowercase ):
snake_case : Tuple = [files]
self._tokenizer.train(_lowercase , trainer=_lowercase )
self.add_unk_id()
def __lowercase ( self : Optional[Any] , _lowercase : Union[Iterator[str], Iterator[Iterator[str]]] , _lowercase : int = 8000 , _lowercase : bool = True , ) -> str:
snake_case : Optional[Any] = trainers.UnigramTrainer(
vocab_size=_lowercase , special_tokens=self.special_tokens_list , show_progress=_lowercase , )
self._tokenizer.train_from_iterator(_lowercase , trainer=_lowercase )
self.add_unk_id()
def __lowercase ( self : Optional[int] ) -> Any:
snake_case : Dict = json.loads(self._tokenizer.to_str() )
snake_case : Tuple = self.special_tokens["unk"]["id"]
snake_case : Dict = Tokenizer.from_str(json.dumps(_lowercase ) )
| 449 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = set()
snake_case : Optional[int] = []
def parse_line(lowerCamelCase_: Union[str, Any] ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
snake_case : str = "\n".join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
snake_case : Dict = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
snake_case : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : Dict = set()
snake_case : str = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
return values.split("," )
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A = parser.parse_args()
A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A = extract_warnings(args.output_dir, args.targets)
A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 449 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
_snake_case : Any = TypeVar('T')
class _UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : T ) -> None:
__lowerCAmelCase = data
__lowerCAmelCase = self
__lowerCAmelCase = 0
class _UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
# map from node name to the node object
__lowerCAmelCase = {}
def lowercase ( self : Dict , lowerCAmelCase_ : T ) -> None:
# create a new set with x as its member
__lowerCAmelCase = DisjointSetTreeNode(lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
__lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowercase ( self : List[Any] , lowerCAmelCase_ : DisjointSetTreeNode[T] , lowerCAmelCase_ : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCAmelCase = nodea
else:
__lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowercase ( self : Optional[int] , lowerCAmelCase_ : T , lowerCAmelCase_ : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(lowerCAmelCase_ ) , self.find_set(lowerCAmelCase_ ) )
class _UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCAmelCase = {}
def lowercase ( self : str , lowerCAmelCase_ : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCAmelCase = {}
def lowercase ( self : Dict , lowerCAmelCase_ : T , lowerCAmelCase_ : T , lowerCAmelCase_ : int ) -> None:
# add an edge with the given weight
self.add_node(lowerCAmelCase_ )
self.add_node(lowerCAmelCase_ )
__lowerCAmelCase = weight
__lowerCAmelCase = weight
def lowercase ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
__lowerCAmelCase = []
__lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase_ : x[2] )
# creating the disjoint set
__lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase_ )
# MST generation
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edges[index]
index += 1
__lowerCAmelCase = disjoint_set.find_set(lowerCAmelCase_ )
__lowerCAmelCase = disjoint_set.find_set(lowerCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
disjoint_set.union(lowerCAmelCase_ , lowerCAmelCase_ )
return graph
| 421 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Any = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_snake_case : Union[str, Any] = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_snake_case : List[str] = 0
_snake_case : int = 0XE000
_snake_case : Optional[int] = 0XE001
_snake_case : Dict = 0XE002
_snake_case : str = 0XE003
_snake_case : Optional[int] = 0XE004
# Maps special codepoints to human-readable names.
_snake_case : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_snake_case : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , lowerCAmelCase_ : Dict=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Optional[Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : str=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Union[str, Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Dict=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : int=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=2_0_4_8 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[Any]:
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , model_max_length=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
__lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowerCAmelCase = UNICODE_VOCAB_SIZE
__lowerCAmelCase = len(self._special_codepoints )
@property
def lowercase ( self : str ) -> int:
return self._unicode_vocab_size
def lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> List[str]:
return list(lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> int:
try:
return ord(lowerCAmelCase_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
return "".join(lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [1] + ([0] * len(lowerCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase_ )) + [1]
return result
def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> int:
return ()
| 421 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowercase_ : str = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def A__( __lowerCAmelCase = "dhaka" , __lowerCAmelCase = 5 ):
_snake_case : Optional[int] = min(__lowerCAmelCase , 50 ) # Prevent abuse!
_snake_case : str = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_snake_case : Tuple = requests.get('https://www.google.com/search' , params=__lowerCAmelCase , headers=__lowerCAmelCase )
_snake_case : Optional[Any] = BeautifulSoup(html.text , 'html.parser' )
_snake_case : Optional[Any] = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_snake_case : List[str] = json.dumps(__lowerCAmelCase )
_snake_case : Optional[Any] = json.loads(__lowerCAmelCase )
_snake_case : str = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __lowerCAmelCase , )
if not matched_google_image_data:
return 0
_snake_case : Any = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__lowerCAmelCase ) , )
_snake_case : Dict = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(__lowerCAmelCase ):
if index >= max_images:
return index
_snake_case : List[str] = bytes(__lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
_snake_case : Optional[int] = bytes(__lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
_snake_case : str = urllib.request.build_opener()
_snake_case : List[Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__lowerCAmelCase )
_snake_case : List[Any] = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCAmelCase , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowercase_ : Any = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 304 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : List[str] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = "▁"
__a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__a = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__a = {
"facebook/s2t-small-librispeech-asr": 1024,
}
__a = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__a = {"mustc": MUSTC_LANGS}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = MAX_MODEL_INPUT_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = []
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str]="<s>" , snake_case_ : List[str]="</s>" , snake_case_ : Any="<pad>" , snake_case_ : Optional[Any]="<unk>" , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[Any]=None , snake_case_ : Any=None , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Any , ):
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , do_upper_case=snake_case_ , do_lower_case=snake_case_ , tgt_lang=snake_case_ , lang_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
snake_case__ : List[str] = do_upper_case
snake_case__ : str = do_lower_case
snake_case__ : Optional[Any] = load_json(snake_case_ )
snake_case__ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case__ : int = spm_file
snake_case__ : Optional[Any] = load_spm(snake_case_ , self.sp_model_kwargs )
if lang_codes is not None:
snake_case__ : List[Any] = lang_codes
snake_case__ : Tuple = LANGUAGES[lang_codes]
snake_case__ : List[str] = [f"<lang:{lang}>" for lang in self.langs]
snake_case__ : Dict = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
snake_case__ : int = self.lang_tokens
snake_case__ : Optional[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case__ : Optional[int] = {}
@property
def lowerCamelCase ( self : List[str] ):
return len(self.encoder )
@property
def lowerCamelCase ( self : Dict ):
return self._tgt_lang
@tgt_lang.setter
def lowerCamelCase ( self : str , snake_case_ : List[str] ):
snake_case__ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : str ):
snake_case__ : Union[str, Any] = self.lang_code_to_id[tgt_lang]
snake_case__ : Tuple = [lang_code_id]
def lowerCamelCase ( self : str , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict ):
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def lowerCamelCase ( self : List[Any] , snake_case_ : int ):
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[str] ):
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case__ : Dict = self.sp_model.decode(snake_case_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case__ : Any = []
else:
current_sub_tokens.append(snake_case_ )
snake_case__ : Any = self.sp_model.decode(snake_case_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Dict=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
snake_case__ : Any = [1] * len(self.prefix_tokens )
snake_case__ : Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Dict = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
snake_case__ : Dict = self.__dict__.copy()
snake_case__ : List[Any] = None
return state
def __setstate__( self : Tuple , snake_case_ : Dict ):
snake_case__ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Optional[int] = {}
snake_case__ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : List[str] = Path(snake_case_ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
snake_case__ : Optional[int] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
snake_case__ : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
snake_case__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> sentencepiece.SentencePieceProcessor:
snake_case__ : str = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase )
spm.Load(str(_lowerCAmelCase ) )
return spm
def __snake_case( _lowerCAmelCase ) -> Union[Dict, List]:
with open(_lowerCAmelCase , """r""" ) as f:
return json.load(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> None:
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=2 )
| 708 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> Dict: # noqa: E741
snake_case__ : List[Any] = len(_lowerCAmelCase )
snake_case__ : str = 0
snake_case__ : str = [0] * n
snake_case__ : Optional[Any] = [False] * n
snake_case__ : Any = [False] * n
def dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if parent == root:
out_edge_count += 1
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
snake_case__ : Optional[int] = dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
snake_case__ : Any = True
# AP found via cycle
if at == low[to]:
snake_case__ : Optional[int] = True
else:
snake_case__ : Union[str, Any] = min(low[at] , _lowerCAmelCase )
return out_edge_count
for i in range(_lowerCAmelCase ):
if not visited[i]:
snake_case__ : Union[str, Any] = 0
snake_case__ : int = dfs(_lowerCAmelCase , _lowerCAmelCase , -1 , _lowerCAmelCase )
snake_case__ : Any = out_edge_count > 1
for x in range(len(_lowerCAmelCase ) ):
if is_art[x] is True:
print(_lowerCAmelCase )
# Adjacency list of graph
__a = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 301 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase: Optional[int] = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase: List[str] = None
try:
import fcntl
except ImportError:
_lowerCAmelCase: Dict = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase: str = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase: Optional[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_lowerCAmelCase: List[Any] = '3.0.12'
_lowerCAmelCase: Any = None
def _lowercase( ):
global _logger
a__ =_logger or logging.getLogger(__name__ )
return _logger
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_) -> Dict:
a__ =lock_file
return None
def __str__( self) -> Tuple:
a__ =F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowercase_ :
def __init__( self , lowercase_) -> Union[str, Any]:
a__ =lock
return None
def __enter__( self) -> Dict:
return self.lock
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> int:
self.lock.release()
return None
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Any:
a__ =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
a__ =self.hash_filename_if_too_long(lowercase_ , lowercase_)
# The path to the lock file.
a__ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a__ =None
# The default timeout value.
a__ =timeout
# We use this lock primarily for the lock counter.
a__ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a__ =0
return None
@property
def __UpperCamelCase ( self) -> str:
return self._lock_file
@property
def __UpperCamelCase ( self) -> Optional[int]:
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , lowercase_) -> List[str]:
a__ =float(lowercase_)
return None
def __UpperCamelCase ( self) -> Tuple:
raise NotImplementedError()
def __UpperCamelCase ( self) -> Dict:
raise NotImplementedError()
@property
def __UpperCamelCase ( self) -> int:
return self._lock_file_fd is not None
def __UpperCamelCase ( self , lowercase_=None , lowercase_=0.05) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
a__ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a__ =id(self)
a__ =self._lock_file
a__ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(lowercase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a__ =max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def __UpperCamelCase ( self , lowercase_=False) -> Optional[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a__ =id(self)
a__ =self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a__ =0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self) -> str:
self.acquire()
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
self.release()
return None
def __del__( self) -> List[str]:
self.release(force=lowercase_)
return None
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =os.path.basename(lowercase_)
if len(lowercase_) > max_length and max_length > 0:
a__ =os.path.dirname(lowercase_)
a__ =str(hash(lowercase_))
a__ =filename[: max_length - len(lowercase_) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase_ , lowercase_)
else:
return path
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
a__ ='\\\\?\\' + relative_to_absolute_path(self.lock_file)
def __UpperCamelCase ( self) -> Tuple:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[Any]:
a__ =self._lock_file_fd
a__ =None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1)
os.close(lowercase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Union[str, Any]:
a__ =os.statvfs(os.path.dirname(lowercase_)).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
a__ =os.open(self._lock_file , lowercase_)
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> Union[str, Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
a__ =self._lock_file_fd
a__ =None
fcntl.flock(lowercase_ , fcntl.LOCK_UN)
os.close(lowercase_)
return None
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[str]:
os.close(self._lock_file_fd)
a__ =None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase: Any = None
if msvcrt:
_lowerCAmelCase: int = WindowsFileLock
elif fcntl:
_lowerCAmelCase: List[str] = UnixFileLock
else:
_lowerCAmelCase: List[str] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 20 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
UpperCamelCase__ : int = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCamelCase__ : str = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" )
UpperCamelCase__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
UpperCamelCase__ : Dict = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
if "visual_encoder" in key:
UpperCamelCase__ : Union[str, Any] = re.sub("visual_encoder*" , "vision_model.encoder" , __lowerCAmelCase )
if "blocks" in key:
UpperCamelCase__ : List[str] = re.sub(R"blocks" , "layers" , __lowerCAmelCase )
if "attn" in key:
UpperCamelCase__ : List[str] = re.sub(R"attn" , "self_attn" , __lowerCAmelCase )
if "norm1" in key:
UpperCamelCase__ : List[str] = re.sub(R"norm1" , "layer_norm1" , __lowerCAmelCase )
if "norm2" in key:
UpperCamelCase__ : Optional[int] = re.sub(R"norm2" , "layer_norm2" , __lowerCAmelCase )
if "encoder.norm" in key:
UpperCamelCase__ : Optional[int] = re.sub(R"encoder.norm" , "post_layernorm" , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
UpperCamelCase__ : List[Any] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __lowerCAmelCase )
if "encoder.pos_embed" in key:
UpperCamelCase__ : Union[str, Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __lowerCAmelCase )
if "encoder.cls_token" in key:
UpperCamelCase__ : Optional[Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __lowerCAmelCase )
if "self_attn" in key:
UpperCamelCase__ : Dict = re.sub(R"self_attn.proj" , "self_attn.projection" , __lowerCAmelCase )
return key
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : List[str] = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase__ : Union[str, Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCamelCase__ : Dict = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
UpperCamelCase__ : Any = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCamelCase__ : List[Any] = blip_decoder(pretrained=__lowerCAmelCase , image_size=384 , vit="base" )
UpperCamelCase__ : str = pt_model.eval()
UpperCamelCase__ : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Union[str, Any] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Tuple = rename_key(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = value
hf_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase__ : Tuple = 384
UpperCamelCase__ : int = load_demo_image(image_size=__lowerCAmelCase , device="cpu" )
UpperCamelCase__ : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase__ : Tuple = tokenizer(["a picture of"] ).input_ids
UpperCamelCase__ : Optional[Any] = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase__ : Optional[Any] = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase__ : int = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCamelCase__ : Union[str, Any] = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="base" )
vqa_model.eval()
UpperCamelCase__ : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Optional[int] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Dict = rename_key(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = value
UpperCamelCase__ : Tuple = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase__ : List[str] = ["How many dogs are in this image?"]
UpperCamelCase__ : int = tokenizer(__lowerCAmelCase , return_tensors="pt" ).input_ids
UpperCamelCase__ : List[str] = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCamelCase__ : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCamelCase__ : Any = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="base" )
itm_model.eval()
UpperCamelCase__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Optional[int] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Any = rename_key(__lowerCAmelCase )
UpperCamelCase__ : List[str] = value
UpperCamelCase__ : List[str] = BlipForImageTextRetrieval(__lowerCAmelCase )
UpperCamelCase__ : Dict = ["A picture of a woman with a dog sitting in a beach"]
UpperCamelCase__ : Optional[Any] = tokenizer(
__lowerCAmelCase , return_tensors="pt" , padding="max_length" , truncation=__lowerCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
UpperCamelCase__ : List[Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Tuple =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 228 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[str] = GPTSwaTokenizer
A_ : Any = False
A_ : Any = True
A_ : List[Any] = False
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ :Dict = GPTSwaTokenizer(UpperCamelCase_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[str] = 'This is a test'
SCREAMING_SNAKE_CASE__ :Optional[int] = 'This is a test'
return input_text, output_text
def __lowerCamelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = '<s>'
SCREAMING_SNAKE_CASE__ :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase_ ) , 20_00 )
def __lowerCamelCase ( self : List[str] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __lowerCamelCase ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ :Tuple = GPTSwaTokenizer(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = GPTSwaTokenizer(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
SCREAMING_SNAKE_CASE__ :List[Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Dict = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
SCREAMING_SNAKE_CASE__ :List[str] = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=UpperCamelCase_ , )
| 720 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 320 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = 'deit'
def __init__( self : List[str] , A_ : Any=7_68 , A_ : List[str]=12 , A_ : Union[str, Any]=12 , A_ : List[str]=30_72 , A_ : int="gelu" , A_ : List[str]=0.0 , A_ : Any=0.0 , A_ : Union[str, Any]=0.02 , A_ : str=1e-1_2 , A_ : Optional[int]=2_24 , A_ : Tuple=16 , A_ : Optional[Any]=3 , A_ : int=True , A_ : Optional[Any]=16 , **A_ : List[Any] , )-> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = encoder_stride
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : List[str] = version.parse('1.11' )
@property
def A ( self : Union[str, Any] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : Tuple )-> float:
return 1e-4 | 505 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 505 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( _lowerCAmelCase , unittest.TestCase):
UpperCamelCase_ = BioGptTokenizer
UpperCamelCase_ = False
def __A ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
def __A ( self : List[str] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
return input_text, output_text
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE : Dict = '''lower'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 708 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _A ( snake_case ) -> Optional[int]:
_lowercase : List[str] = []
for line in lines:
_lowercase : Tuple = re.sub(r"#.*" , "" , SCREAMING_SNAKE_CASE__ ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE__ )
_lowercase : Any = "\n".join(SCREAMING_SNAKE_CASE__ )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("utf-8" )
return shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest()
# get importable module names and hash for caching
_snake_case = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_snake_case = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_snake_case = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_snake_case = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 245 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = KandinskyVaaControlnetPipeline
lowerCAmelCase_ : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_UpperCAmelCase , )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create hint
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase__ = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0
UpperCAmelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
UpperCAmelCase__ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """A robot, 4k photo"""
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ = pipeline(
image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 603 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__A : int = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCamelCase ( _A : Dict ) ->Tuple:
"""simple docstring"""
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCamelCase_ =[image]
lowerCamelCase_ =[trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase_ =torch.stack(UpperCAmelCase__ )
return image
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
# get the original timestep using init_timestep
lowerCamelCase_ =min(int(num_inference_steps * strength ) , __UpperCamelCase )
lowerCamelCase_ =max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}' )
lowerCamelCase_ =image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCamelCase_ =init_latents.shape
lowerCamelCase_ =randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
print("""add noise to latents at timestep""" , __UpperCamelCase )
lowerCamelCase_ =self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ =init_latents
return latents
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.8 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , )-> str:
self.check_inputs(__UpperCamelCase )
# 2. Preprocess image
lowerCamelCase_ =preprocess(__UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
lowerCamelCase_ , lowerCamelCase_ =self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
lowerCamelCase_ =timesteps[:1].repeat(__UpperCamelCase )
# 4. Prepare latent variables
lowerCamelCase_ =self.prepare_latents(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.unet.dtype , self.device , __UpperCamelCase )
lowerCamelCase_ =latents
# 5. Denoising loop
for t in self.progress_bar(__UpperCamelCase ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , eta=__UpperCamelCase , use_clipped_model_output=__UpperCamelCase , generator=__UpperCamelCase , ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__UpperCamelCase )
| 714 |
# Imports
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
if red is not None:
lowerCamelCase_ =red
if green is not None:
lowerCamelCase_ =green
if blue is not None:
lowerCamelCase_ =blue
if red_edge is not None:
lowerCamelCase_ =red_edge
if nir is not None:
lowerCamelCase_ =nir
return True
def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self )-> Optional[Any]:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self )-> Tuple:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self )-> str:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self )-> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self )-> Tuple:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self )-> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self )-> List[Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self )-> Tuple:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self )-> Tuple:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self )-> Any:
return (self.nir / self.green) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.red - self.blue) / self.red
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self )-> int:
return self.nir - self.green
def _snake_case ( self )-> Dict:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self )-> int:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self )-> int:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self )-> Optional[Any]:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self )-> List[str]:
return self.nir / self.red
def _snake_case ( self )-> List[str]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self )-> str:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self )-> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self )-> Dict:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self )-> List[str]:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self )-> int:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self )-> str:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self )-> str:
lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self )-> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self )-> List[Any]:
return self.nir / self.red
def _snake_case ( self )-> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self )-> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 75 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=1_8 , UpperCamelCase__ : List[str]=3_0 , UpperCamelCase__ : Union[str, Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=True , )-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: str = batch_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: int = image_size
__lowerCAmelCase: int = min_resolution
__lowerCAmelCase: Optional[Any] = max_resolution
__lowerCAmelCase: Any = do_resize
__lowerCAmelCase: int = size
__lowerCAmelCase: int = do_normalize
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class snake_case ( lowercase__, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = ImageGPTImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> int:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = ImageGPTImageProcessingTester(self)
@property
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , "clusters"))
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase__ , "size"))
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize"))
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2})
def lowercase_ ( self : Optional[Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict)
__lowerCAmelCase: List[str] = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase__ , obj[key]))
else:
self.assertEqual(obj[key] , UpperCAmelCase__)
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase: Any = os.path.join(UpperCAmelCase__ , "image_processor.json")
image_processor_first.to_json_file(UpperCAmelCase__)
__lowerCAmelCase: Optional[Any] = self.image_processing_class.from_json_file(UpperCAmelCase__).to_dict()
__lowerCAmelCase: Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase__)
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase__)
__lowerCAmelCase: Tuple = self.image_processing_class.from_pretrained(UpperCAmelCase__).to_dict()
__lowerCAmelCase: Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase__)
@unittest.skip("ImageGPT requires clusters at initialization")
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
pass
def a__ ( ) -> Dict:
__lowerCAmelCase: List[Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
__lowerCAmelCase: Optional[int] = Image.open(dataset[4]["file"] )
__lowerCAmelCase: Optional[Any] = Image.open(dataset[5]["file"] )
__lowerCAmelCase: Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small")
__lowerCAmelCase: int = prepare_images()
# test non-batched
__lowerCAmelCase: int = image_processing(images[0] , return_tensors="pt")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__lowerCAmelCase: Tuple = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase__)
# test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCAmelCase__ , return_tensors="pt")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__lowerCAmelCase: str = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase__)
| 346 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _A ( UpperCAmelCase_ ):
'''simple docstring'''
lowercase_ : Dict = ["pixel_values"]
def __init__( self : List[str] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(**_snake_case )
__UpperCamelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 2_24}
__UpperCamelCase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
__UpperCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__UpperCamelCase : List[Any] = get_size_dict(_snake_case , param_name="""crop_size""" )
__UpperCamelCase : str = do_resize
__UpperCamelCase : Any = size
__UpperCamelCase : Tuple = resample
__UpperCamelCase : str = do_center_crop
__UpperCamelCase : Union[str, Any] = crop_size
__UpperCamelCase : List[Any] = do_rescale
__UpperCamelCase : Optional[int] = rescale_factor
__UpperCamelCase : Any = do_normalize
__UpperCamelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
__UpperCamelCase : List[str] = get_size_dict(_snake_case , default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__UpperCamelCase : Tuple = int((2_56 / 2_24) * size["""shortest_edge"""] )
__UpperCamelCase : Optional[int] = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
__UpperCamelCase : Dict = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_snake_case , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def a ( self : List[str] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ):
"""simple docstring"""
__UpperCamelCase : List[str] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_snake_case , size=(size["""height"""], size["""width"""]) , data_format=_snake_case , **_snake_case )
def a ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def a ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def a ( self : Tuple , lowerCamelCase__ : ImageInput , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Dict[str, int]] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Dict[str, int]] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = None , lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = None , lowerCamelCase__ : Optional[TensorType] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Any , ):
"""simple docstring"""
__UpperCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : List[Any] = resample if resample is not None else self.resample
__UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : Dict = image_std if image_std is not None else self.image_std
__UpperCamelCase : Dict = size if size is not None else self.size
__UpperCamelCase : Tuple = get_size_dict(_snake_case , default_to_square=_snake_case )
__UpperCamelCase : Dict = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase : Any = get_size_dict(_snake_case , param_name="""crop_size""" )
__UpperCamelCase : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__UpperCamelCase : Tuple = [self.resize(_snake_case , _snake_case , _snake_case ) for image in images]
if do_center_crop:
__UpperCamelCase : Optional[Any] = [self.center_crop(_snake_case , _snake_case ) for image in images]
if do_rescale:
__UpperCamelCase : Dict = [self.rescale(_snake_case , _snake_case ) for image in images]
if do_normalize:
__UpperCamelCase : List[Any] = [self.normalize(_snake_case , _snake_case , _snake_case ) for image in images]
__UpperCamelCase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _A ( UpperCAmelCase_ ):
lowercase_ : Union[str, Any] = '''gpt_neox_japanese'''
def __init__( self : Optional[Any] , lowerCamelCase__ : List[Any]=3_20_00 , lowerCamelCase__ : Any=25_60 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Union[str, Any]=32 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : str=1.00 , lowerCamelCase__ : str=1_00_00 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : List[Any]=1e-5 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=3_19_96 , lowerCamelCase__ : Optional[Any]=3_19_99 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[Any]=0.0 , **lowerCamelCase__ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : List[Any] = max_position_embeddings
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : List[str] = intermediate_multiple_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : str = rotary_pct
__UpperCamelCase : Optional[int] = rotary_emb_base
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = layer_norm_eps
__UpperCamelCase : Optional[int] = use_cache
__UpperCamelCase : Optional[int] = attention_dropout
__UpperCamelCase : int = hidden_dropout
| 515 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 32
A_ = 2
A_ = 4
A_ = 37
A_ = """gelu"""
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = None
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.prepare_config_and_inputs()
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = TFEsmModel(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ = model(UpperCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
A_ = True
A_ = TFEsmModel(config=UpperCamelCase__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A_ = model(UpperCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ )
# Also check the case where encoder outputs are not passed
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = TFEsmForMaskedLM(config=UpperCamelCase__ )
A_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFEsmForTokenClassification(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = TFEsmModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFEsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A_ = model.get_bias()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for k, v in name.items():
assert isinstance(UpperCamelCase__ , tf.Variable )
else:
A_ = model.get_output_embeddings()
assert x is None
A_ = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCamelCase__ )[0]
A_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A_ = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A_ = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A_ = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 288 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = projection_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = dropout
A_ = attention_dropout
A_ = max_position_embeddings
A_ = initializer_range
A_ = scope
A_ = bos_token_id
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A_ = input_mask.numpy()
A_ , A_ = input_mask.shape
A_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
A_ = 1
A_ = 0
A_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = TFBlipTextModel(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = BlipTextModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 288 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A = True
except ImportError:
A = False
try:
from torch.hub import _get_torch_home
A = _get_torch_home()
except ImportError:
A = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
A = os.path.join(torch_cache_home, '''transformers''')
A = '''https://cdn.huggingface.co'''
A = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
A = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
A = os.path.join(PATH, '''config.yaml''')
A = os.path.join(PATH, '''attributes.txt''')
A = os.path.join(PATH, '''objects.txt''')
A = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
A = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
A = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
A = '''pytorch_model.bin'''
A = '''config.yaml'''
def __A ( a_ :int=OBJECTS , a_ :str=ATTRIBUTES) -> Any:
__a : Dict = []
with open(a_) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''')[0].lower().strip())
__a : List[Any] = []
with open(a_) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''')[0].lower().strip())
return vg_classes, vg_attrs
def __A ( a_ :Union[str, Any]) -> Any:
__a : List[str] = OrderedDict()
with open(a_ , '''rb''') as f:
__a : Optional[int] = pkl.load(a_)['''model''']
for k in copy.deepcopy(list(ckp.keys())):
__a : str = ckp.pop(a_)
if isinstance(a_ , np.ndarray):
__a : List[str] = torch.tensor(a_)
else:
assert isinstance(a_ , torch.tensor), type(a_)
__a : int = v
return r
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "root" , _UpperCAmelCase=0 ):
__a : Tuple = name
__a : Dict = level
__a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__a : Optional[int] = copy.deepcopy(_UpperCAmelCase )
__a : str = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = Config(_UpperCAmelCase , name=_UpperCAmelCase , level=level + 1 )
__a : int = v
setattr(self , _UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = val
__a : Optional[Any] = val
__a : List[str] = key.split('''.''' )
__a : Union[str, Any] = len(_UpperCAmelCase ) - 1
__a : Any = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self , _UpperCAmelCase ) and isinstance(getattr(self , _UpperCAmelCase ) , _UpperCAmelCase ):
setattr(getattr(self , _UpperCAmelCase ) , '''.'''.join(levels[i:] ) , _UpperCAmelCase )
if l == last_level:
__a : str = val
else:
__a : Any = pointer[l]
def _lowerCamelCase ( self ):
return self._pointer
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase ):
with open(_UpperCAmelCase ) as stream:
__a : int = load(_UpperCAmelCase , Loader=_UpperCAmelCase )
return data
def __str__( self ):
__a : Tuple = ''' '''
if self._name != "root":
__a : Union[str, Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__a : Optional[Any] = ''''''
__a : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n"""
__a : Any = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
__a : Tuple = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
__a : Dict = kwargs.pop('''cache_dir''' , _UpperCAmelCase )
__a : int = kwargs.pop('''force_download''' , _UpperCAmelCase )
__a : Tuple = kwargs.pop('''resume_download''' , _UpperCAmelCase )
__a : Dict = kwargs.pop('''proxies''' , _UpperCAmelCase )
__a : Tuple = kwargs.pop('''local_files_only''' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
__a : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
__a : Optional[int] = pretrained_model_name_or_path
else:
__a : List[str] = hf_bucket_url(_UpperCAmelCase , filename=_UpperCAmelCase , use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
__a : List[str] = cached_path(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__a : Tuple = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
__a : Tuple = '''Can\'t load config for'''
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __A ( a_ :Any) -> Union[str, Any]:
__a : Optional[int] = torch.load('''dump.pt''' , map_location=in_tensor.device)
__a : List[str] = in_tensor.numpy()
__a : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5])
print(na.shape , na[0, 0, :5])
assert np.allclose(a_ , a_ , rtol=0.0_1 , atol=0.1), (
F"""{sum([1 for x in np.isclose(a_ , a_ , rtol=0.0_1 , atol=0.1).flatten() if x is False])/len(na.flatten())*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''')
# Hugging face functions below
def __A ( a_ :List[Any]) -> List[Any]:
__a : List[str] = urlparse(a_)
return parsed.scheme in ("http", "https")
def __A ( a_ :str , a_ :str , a_ :str=True) -> str:
__a : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__a : Optional[int] = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def __A ( a_ :Optional[Any] , a_ :Optional[Any] , a_ :List[str]=None , a_ :Optional[Any]=0 , a_ :Optional[Any]=None , ) -> int:
__a : List[str] = '''python/{}'''.format(sys.version.split()[0])
if _torch_available:
ua += "; torch/{}".format(torch.__version__)
if isinstance(a_ , a_):
ua += "; " + "; ".join('''{}/{}'''.format(a_ , a_) for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
__a : Optional[Any] = {'''user-agent''': ua}
if resume_size > 0:
__a : Dict = '''bytes=%d-''' % (resume_size,)
__a : Dict = requests.get(a_ , stream=a_ , proxies=a_ , headers=a_)
if response.status_code == 4_16: # Range not satisfiable
return
__a : Tuple = response.headers.get('''Content-Length''')
__a : Dict = resume_size + int(a_) if content_length is not None else None
__a : Optional[int] = tqdm(
unit='''B''' , unit_scale=a_ , total=a_ , initial=a_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=10_24):
if chunk: # filter out keep-alive new chunks
progress.update(len(a_))
temp_file.write(a_)
progress.close()
def __A ( a_ :Dict , a_ :Tuple=None , a_ :Optional[Any]=False , a_ :List[Any]=None , a_ :Union[str, Any]=10 , a_ :Union[str, Any]=False , a_ :List[Any]=None , a_ :Dict=False , ) -> Any:
if cache_dir is None:
__a : int = TRANSFORMERS_CACHE
if isinstance(a_ , a_):
__a : List[str] = str(a_)
os.makedirs(a_ , exist_ok=a_)
__a : List[str] = None
if not local_files_only:
try:
__a : int = requests.head(a_ , allow_redirects=a_ , proxies=a_ , timeout=a_)
if response.status_code == 2_00:
__a : Any = response.headers.get('''ETag''')
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__a : Any = url_to_filename(a_ , a_)
# get cache path to put the file
__a : Tuple = os.path.join(a_ , a_)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a_):
return cache_path
else:
__a : Tuple = [
file
for file in fnmatch.filter(os.listdir(a_) , filename + '''.*''')
if not file.endswith('''.json''') and not file.endswith('''.lock''')
]
if len(a_) > 0:
return os.path.join(a_ , matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''')
return None
# From now on, etag is not None.
if os.path.exists(a_) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__a : Dict = cache_path + '''.lock'''
with FileLock(a_):
# If the download just completed while the lock was activated.
if os.path.exists(a_) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__a : Optional[int] = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(a_ , '''a+b''') as f:
yield f
__a : Optional[int] = _resumable_file_manager
if os.path.exists(a_):
__a : Optional[int] = os.stat(a_).st_size
else:
__a : Any = 0
else:
__a : List[str] = partial(tempfile.NamedTemporaryFile , dir=a_ , delete=a_)
__a : int = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , a_ , temp_file.name , )
http_get(
a_ , a_ , proxies=a_ , resume_size=a_ , user_agent=a_ , )
os.replace(temp_file.name , a_)
__a : Dict = {'''url''': url, '''etag''': etag}
__a : Optional[Any] = cache_path + '''.json'''
with open(a_ , '''w''') as meta_file:
json.dump(a_ , a_)
return cache_path
def __A ( a_ :Union[str, Any] , a_ :List[str]=None) -> Any:
__a : Union[str, Any] = url.encode('''utf-8''')
__a : Tuple = shaaaa(a_)
__a : Optional[Any] = url_hash.hexdigest()
if etag:
__a : Optional[Any] = etag.encode('''utf-8''')
__a : Tuple = shaaaa(a_)
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5'''):
filename += ".h5"
return filename
def __A ( a_ :Optional[Any] , a_ :Optional[int]=None , a_ :Tuple=False , a_ :Optional[int]=None , a_ :Union[str, Any]=False , a_ :Tuple=None , a_ :List[Any]=False , a_ :str=False , a_ :List[Any]=False , ) -> Any:
if cache_dir is None:
__a : Dict = TRANSFORMERS_CACHE
if isinstance(a_ , a_):
__a : int = str(a_)
if isinstance(a_ , a_):
__a : List[Any] = str(a_)
if is_remote_url(a_):
# URL, so get it from the cache (downloading if necessary)
__a : List[str] = get_from_cache(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , user_agent=a_ , local_files_only=a_ , )
elif os.path.exists(a_):
# File, and it exists.
__a : Any = url_or_filename
elif urlparse(a_).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(a_))
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(a_))
if extract_compressed_file:
if not is_zipfile(a_) and not tarfile.is_tarfile(a_):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__a : List[str] = os.path.split(a_)
__a : Optional[Any] = output_file.replace('''.''' , '''-''') + '''-extracted'''
__a : Optional[int] = os.path.join(a_ , a_)
if os.path.isdir(a_) and os.listdir(a_) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__a : List[str] = output_path + '''.lock'''
with FileLock(a_):
shutil.rmtree(a_ , ignore_errors=a_)
os.makedirs(a_)
if is_zipfile(a_):
with ZipFile(a_ , '''r''') as zip_file:
zip_file.extractall(a_)
zip_file.close()
elif tarfile.is_tarfile(a_):
__a : Dict = tarfile.open(a_)
tar_file.extractall(a_)
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(a_))
return output_path_extracted
return output_path
def __A ( a_ :Tuple , a_ :Optional[int]=",") -> Optional[Any]:
assert isinstance(a_ , a_)
if os.path.isfile(a_):
with open(a_) as f:
__a : Any = eval(f.read())
else:
__a : Tuple = requests.get(a_)
try:
__a : int = requests.json()
except Exception:
__a : Optional[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__a : Optional[int] = eval(a_)
except Exception:
__a : Any = data.split('''\n''')
req.close()
return data
def __A ( a_ :List[str]) -> str:
__a : List[Any] = requests.get(a_)
__a : Tuple = np.array(Image.open(BytesIO(response.content)))
return img
def __A ( a_ :Dict) -> Dict:
__a : List[Any] = url.split('''/''')[-1]
if fn not in os.listdir(os.getcwd()):
wget.download(a_)
with open(a_ , '''rb''') as stream:
__a : Optional[int] = pkl.load(a_)
__a : Optional[int] = weights.pop('''model''')
__a : Dict = {}
for k, v in model.items():
__a : str = torch.from_numpy(a_)
if "running_var" in k:
__a : List[Any] = torch.tensor([0])
__a : Optional[Any] = k.replace('''running_var''' , '''num_batches_tracked''')
__a : str = zero
return new
def __A ( ) -> int:
print(F"""{os.path.abspath(os.path.join(a_ , os.pardir))}/demo.ipynb""")
def __A ( a_ :Optional[int] , a_ :List[Any]="RGB") -> str:
assert isinstance(a_ , a_)
if os.path.isfile(a_):
__a : str = cva.imread(a_)
else:
__a : Dict = get_image_from_url(a_)
assert img is not None, F"""could not connect to: {im}"""
__a : Tuple = cva.cvtColor(a_ , cva.COLOR_BGR2RGB)
if input_format == "RGB":
__a : Dict = img[:, :, ::-1]
return img
def __A ( a_ :Optional[int] , a_ :str=1) -> List[str]:
return (images[i : i + batch] for i in range(0 , len(a_) , a_)) | 711 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''autoformer'''
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase = True , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 64 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = True , _UpperCAmelCase=True , _UpperCAmelCase = 10 , _UpperCAmelCase = 25 , _UpperCAmelCase = 3 , **_UpperCAmelCase , ):
# time series specific configuration
__a : Optional[int] = prediction_length
__a : Optional[int] = context_length if context_length is not None else prediction_length
__a : Union[str, Any] = distribution_output
__a : str = loss
__a : Optional[Any] = input_size
__a : str = num_time_features
__a : Optional[Any] = lags_sequence
__a : Any = scaling
__a : Optional[Any] = num_dynamic_real_features
__a : str = num_static_real_features
__a : Any = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a : List[str] = cardinality
else:
__a : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a : Any = embedding_dimension
else:
__a : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
__a : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
__a : List[Any] = d_model
__a : List[str] = encoder_attention_heads
__a : Union[str, Any] = decoder_attention_heads
__a : Optional[int] = encoder_ffn_dim
__a : List[str] = decoder_ffn_dim
__a : List[str] = encoder_layers
__a : Optional[int] = decoder_layers
__a : Union[str, Any] = dropout
__a : Union[str, Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : List[str] = encoder_layerdrop
__a : Any = decoder_layerdrop
__a : Union[str, Any] = activation_function
__a : Union[str, Any] = init_std
__a : List[str] = use_cache
# Autoformer
__a : int = label_length
__a : List[Any] = moving_average
__a : str = autocorrelation_factor
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 101 | 0 |
from math import ceil
def _UpperCamelCase (a__ :Dict , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = list(range(0 , lowerCAmelCase__ ) )
UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase__ )
# Missing blocks
UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(lowerCAmelCase__ ) )
def _UpperCamelCase (a__ :Optional[int] , a__ :List[str] ):
"""simple docstring"""
UpperCamelCase__ = list(range(lowerCAmelCase__ ) )
UpperCamelCase__ = int(ceil(n_layers / len(lowerCAmelCase__ ) ) )
UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
| 619 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCAmelCase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase__ ( ) -> Any:
'''simple docstring'''
a__ : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
a__ : Any = bs[:]
a__ : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
a__ : Tuple = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
a__ : Tuple = set()
a__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ : Tuple = char
return pairs
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self : str , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : int="replace" , a_ : Optional[Any]="<s>" , a_ : Optional[Any]="</s>" , a_ : Any="</s>" , a_ : Dict="<s>" , a_ : Optional[int]="<unk>" , a_ : Optional[int]="<pad>" , a_ : Any="<mask>" , a_ : Optional[Any]=False , **a_ : str , ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token
a__ : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token
a__ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token
a__ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
a__ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : Dict = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
errors=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , **a_ , )
with open(a_ , encoding="utf-8" ) as vocab_handle:
a__ : List[Any] = json.load(a_ )
a__ : str = {v: k for k, v in self.encoder.items()}
a__ : int = errors # how to handle errors in decoding
a__ : int = bytes_to_unicode()
a__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a_ , encoding="utf-8" ) as merges_handle:
a__ : Dict = merges_handle.read().split("\n" )[1:-1]
a__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Tuple = {}
a__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self : Dict , a_ : str ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a__ : List[str] = tuple(a_ )
a__ : Dict = get_pairs(a_ )
if not pairs:
return token
while True:
a__ : Dict = min(a_ , key=lambda a_ : self.bpe_ranks.get(a_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : List[Any] = bigram
a__ : int = []
a__ : List[Any] = 0
while i < len(a_ ):
try:
a__ : List[str] = word.index(a_ , a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Dict = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(a_ )
a__ : str = new_word
if len(a_ ) == 1:
break
else:
a__ : Optional[int] = get_pairs(a_ )
a__ : Union[str, Any] = " ".join(a_ )
a__ : str = word
return word
def UpperCAmelCase ( self : Optional[int] , a_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : List[Any] = []
for token in re.findall(self.pat , a_ ):
a__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase ( self : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Optional[Any] , a_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.decoder.get(a_ )
def UpperCAmelCase ( self : str , a_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : str = "".join(a_ )
a__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase ( self : Any , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Dict = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a__ : List[str] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + "\n" )
a__ : Dict = 0
with open(a_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
a__ : Optional[Any] = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def UpperCAmelCase ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self : Any , a_ : Optional[Any] , a_ : Union[str, Any]=False , **a_ : Dict ) -> Dict:
'''simple docstring'''
a__ : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()):
a__ : int = " " + text
return (text, kwargs)
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> Optional[int]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : str , a_ : "Conversation" ) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
a__ : Dict = " ".join(a_ )
a__ : List[str] = self.encode(a_ )
if len(a_ ) > self.model_max_length:
a__ : Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 642 | 0 |
'''simple docstring'''
def A ( A_ : int = 600851475143 ):
try:
snake_case : Dict = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
snake_case : Tuple = 2
snake_case : List[str] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case : str = i
while n % i == 0:
snake_case : str = n // i
i += 1
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 706 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 555 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self ):
super().setup()
UpperCAmelCase__ : Optional[Any] = self.model.config
UpperCAmelCase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase__ : Dict = int(_lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(_lowerCAmelCase ) , [f"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : str = outputs.logits
UpperCAmelCase__ : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 79 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : list ):
'''simple docstring'''
lowercase__ = set_counts
lowercase__ = max(lowerCamelCase )
lowercase__ = len(lowerCamelCase )
lowercase__ = [1] * num_sets
lowercase__ = list(range(lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.get_parent(lowerCamelCase )
lowercase__ = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ = 0
lowercase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ = 0
lowercase__ = src_parent
lowercase__ = self.set_counts[src_parent]
lowercase__ = max(self.max_set, lowerCamelCase )
return True
def lowercase__ ( self : Any, lowerCamelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : int = "convnextv2"
def __init__( self, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = num_channels
UpperCamelCase : str = patch_size
UpperCamelCase : Tuple = num_stages
UpperCamelCase : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Any = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Optional[Any] = drop_path_rate
UpperCamelCase : int = image_size
UpperCamelCase : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1, len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
| 40 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase =logging.getLogger()
def __UpperCamelCase ( _lowerCAmelCase : Path , _lowerCAmelCase : list ):
"""simple docstring"""
UpperCAmelCase = "\n".join(_lowerCAmelCase )
Path(_lowerCAmelCase ).open("w" ).writelines(_lowerCAmelCase )
__lowerCAmelCase ="patrickvonplaten/t5-tiny-random"
__lowerCAmelCase ="sshleifer/bart-tiny-random"
__lowerCAmelCase ="sshleifer/tiny-mbart"
__lowerCAmelCase =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __magic_name__ ( lowercase_):
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(UpperCamelCase__ ,UpperCamelCase__ )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
UpperCAmelCase = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(UpperCamelCase__ ,"argv" ,UpperCamelCase__ ):
run_generate()
assert Path(UpperCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def _UpperCAmelCase ( self : Optional[Any] ):
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : Tuple ):
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / "scores.json" )
UpperCAmelCase = str(tmp_dir / "val.target" )
_dump_articles(UpperCamelCase__ ,text["en"] )
_dump_articles(UpperCamelCase__ ,text["de"] )
UpperCAmelCase = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase = f'''
run_eval_search.py
{model}
{str(UpperCamelCase__ )}
{str(UpperCamelCase__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(UpperCamelCase__ ,"argv" ,UpperCamelCase__ ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [" num_beams | length_penalty", model, "Best score args"]
UpperCAmelCase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(UpperCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase__ ).exists()
os.remove(Path(UpperCamelCase__ ) )
| 711 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__lowerCAmelCase =logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase = 10 , _lowerCAmelCase = 2 ):
"""simple docstring"""
def get_dataset(_lowerCAmelCase ):
UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase = get_dataset(_lowerCAmelCase )
UpperCAmelCase = get_dataset(_lowerCAmelCase )
UpperCAmelCase = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
UpperCAmelCase = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
UpperCAmelCase = []
for epoch in range(_lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(_lowerCAmelCase )
UpperCAmelCase = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
accelerator.backward(_lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module):
def __init__( self : Any ):
super().__init__()
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(total_limit=1 ,project_dir=__SCREAMING_SNAKE_CASE ,automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def _UpperCAmelCase ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
# Train baseline
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"initial" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
accelerator.load_state(__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = train(2 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save everything
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoint" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE )
test_rands += train(1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = train(2 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_1" ) )
test_rands += train(1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = torch.tensor([1, 2, 3] )
UpperCAmelCase = torch.tensor([2, 3, 4] )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(net.parameters() )
UpperCAmelCase = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCAmelCase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE ,step_size=1 ,gamma=0.99 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCAmelCase = scheduler.state_dict()
train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
self.assertEqual(__SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
def _UpperCAmelCase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ,total_limit=2 )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_10" ) ) )
@require_cuda
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase ="/tmp/accelerate/state_checkpointing"
__lowerCAmelCase =DummyModel()
__lowerCAmelCase =torch.optim.Adam(params=model.parameters(), lr=1e-3)
__lowerCAmelCase =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__lowerCAmelCase , __lowerCAmelCase =dummy_dataloaders()
__lowerCAmelCase =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__lowerCAmelCase =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert param_device.type == accelerator.device.type
__lowerCAmelCase =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 405 | 0 |
import numpy as np
a__ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = np.array(lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int = np.where(letter == self.SQUARE )
__UpperCamelCase : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : int ) -> str:
"""simple docstring"""
__UpperCamelCase : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str ) -> str:
"""simple docstring"""
__UpperCamelCase : int = message.lower()
__UpperCamelCase : Optional[int] = message.replace(""" """ , """""" )
__UpperCamelCase : List[str] = message.replace("""j""" , """i""" )
__UpperCamelCase : str = np.empty((2, len(lowerCAmelCase )) )
for letter_index in range(len(lowerCAmelCase ) ):
__UpperCamelCase : Dict = self.letter_to_numbers(message[letter_index] )
__UpperCamelCase : Any = numbers[0]
__UpperCamelCase : Optional[Any] = numbers[1]
__UpperCamelCase : Any = first_step.reshape(2 * len(lowerCAmelCase ) )
__UpperCamelCase : Any = """"""
for numbers_index in range(len(lowerCAmelCase ) ):
__UpperCamelCase : Optional[Any] = int(second_step[numbers_index * 2] )
__UpperCamelCase : List[str] = int(second_step[(numbers_index * 2) + 1] )
__UpperCamelCase : Dict = self.numbers_to_letter(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : int = encoded_message + letter
return encoded_message
def lowerCamelCase__ ( self : str , lowerCAmelCase : str ) -> str:
"""simple docstring"""
__UpperCamelCase : Dict = message.lower()
message.replace(""" """ , """""" )
__UpperCamelCase : Dict = np.empty(2 * len(lowerCAmelCase ) )
for letter_index in range(len(lowerCAmelCase ) ):
__UpperCamelCase : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
__UpperCamelCase : str = numbers[0]
__UpperCamelCase : Optional[Any] = numbers[1]
__UpperCamelCase : Tuple = first_step.reshape((2, len(lowerCAmelCase )) )
__UpperCamelCase : Union[str, Any] = """"""
for numbers_index in range(len(lowerCAmelCase ) ):
__UpperCamelCase : Tuple = int(second_step[0, numbers_index] )
__UpperCamelCase : Union[str, Any] = int(second_step[1, numbers_index] )
__UpperCamelCase : Dict = self.numbers_to_letter(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Tuple = decoded_message + letter
return decoded_message
| 279 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def A__ (snake_case : Tuple ) -> Dict:
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case , """all_results.json""" )
if os.path.exists(snake_case ):
with open(snake_case , """r""" ) as f:
__UpperCamelCase : List[Any] = json.load(snake_case )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
import xla_spawn
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = time()
xla_spawn.main()
__UpperCamelCase : List[str] = time()
__UpperCamelCase : Optional[int] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
import xla_spawn
__UpperCamelCase : List[Any] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
xla_spawn.main()
| 279 | 1 |
def UpperCamelCase_ ( A__ : int , A__ : float , A__ : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : str = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'autoformer'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "student_t" , lowerCamelCase : str = "nll" , lowerCamelCase : int = 1 , lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase : bool = True , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : int = 64 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 32 , lowerCamelCase : int = 32 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : int = 1_00 , lowerCamelCase : float = 0.02 , lowerCamelCase : bool = True , lowerCamelCase : Optional[int]=True , lowerCamelCase : int = 10 , lowerCamelCase : int = 25 , lowerCamelCase : int = 3 , **lowerCamelCase : Dict , ) -> List[Any]:
# time series specific configuration
lowerCAmelCase_ : str = prediction_length
lowerCAmelCase_ : str = context_length if context_length is not None else prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : Optional[Any] = loss
lowerCAmelCase_ : List[str] = input_size
lowerCAmelCase_ : Optional[int] = num_time_features
lowerCAmelCase_ : List[Any] = lags_sequence
lowerCAmelCase_ : str = scaling
lowerCAmelCase_ : Optional[Any] = num_dynamic_real_features
lowerCAmelCase_ : str = num_static_real_features
lowerCAmelCase_ : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase_ : Union[str, Any] = cardinality
else:
lowerCAmelCase_ : int = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : List[str] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : List[str] = d_model
lowerCAmelCase_ : Optional[int] = encoder_attention_heads
lowerCAmelCase_ : List[str] = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : List[Any] = decoder_ffn_dim
lowerCAmelCase_ : Dict = encoder_layers
lowerCAmelCase_ : int = decoder_layers
lowerCAmelCase_ : Tuple = dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : str = activation_dropout
lowerCAmelCase_ : List[Any] = encoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ : Optional[int] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Autoformer
lowerCAmelCase_ : Optional[Any] = label_length
lowerCAmelCase_ : List[Any] = moving_average
lowerCAmelCase_ : List[str] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __lowercase ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 398 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__UpperCAmelCase = True
from torch.cuda.amp import autocast
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ : List[str]=None , snake_case__ : List[str]=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : Optional[bool] = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self, SCREAMING_SNAKE_CASE_ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
UpperCamelCase : Any = [{'input_values': feature['input_values']} for feature in features]
UpperCamelCase : int = [{'input_ids': feature['labels']} for feature in features]
UpperCamelCase : Dict = self.processor.pad(
SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', )
UpperCamelCase : Optional[int] = self.processor.pad(
labels=SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt', )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ), -100 )
UpperCamelCase : Dict = labels
return batch
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
model.train()
UpperCamelCase : Optional[Any] = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
if self.use_amp:
with autocast():
UpperCamelCase : str = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : Optional[int] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE_ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE_, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCamelCase : Optional[int] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCamelCase : str = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
UpperCamelCase : List[Any] = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = re.sub(snake_case__ , '' , batch['sentence'] ).lower() + ' '
return batch
UpperCamelCase : List[Any] = train_dataset.map(snake_case__ , remove_columns=['sentence'] )
UpperCamelCase : Dict = eval_dataset.map(snake_case__ , remove_columns=['sentence'] )
def extract_all_chars(snake_case__ : Dict ):
UpperCamelCase : Union[str, Any] = ' '.join(batch['text'] )
UpperCamelCase : List[str] = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Any = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
UpperCamelCase : List[Any] = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Tuple = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
UpperCamelCase : Optional[Any] = {v: k for k, v in enumerate(snake_case__ )}
UpperCamelCase : Optional[int] = vocab_dict[' ']
del vocab_dict[" "]
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : Dict = len(snake_case__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[Any] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
UpperCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Optional[Any] = min(len(snake_case__ ) , data_args.max_train_samples )
UpperCamelCase : Tuple = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
UpperCamelCase : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCamelCase : int = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ : int ):
UpperCamelCase , UpperCamelCase : Tuple = torchaudio.load(batch['path'] )
UpperCamelCase : Optional[Any] = resampler(snake_case__ ).squeeze().numpy()
UpperCamelCase : Dict = 16000
UpperCamelCase : Optional[int] = batch['text']
return batch
UpperCamelCase : Optional[int] = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : str = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
UpperCamelCase : Dict = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(snake_case__ )
return batch
UpperCamelCase : int = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Dict = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : List[str] = datasets.load_metric('wer' )
def compute_metrics(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = pred.predictions
UpperCamelCase : int = np.argmax(snake_case__ , axis=-1 )
UpperCamelCase : List[Any] = processor.tokenizer.pad_token_id
UpperCamelCase : int = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
UpperCamelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
UpperCamelCase : Dict = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Optional[int] = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
UpperCamelCase : Tuple = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase : List[str] = model_args.model_name_or_path
else:
UpperCamelCase : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCamelCase : Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
UpperCamelCase : Dict = train_result.metrics
UpperCamelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
UpperCamelCase : Union[str, Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('train' , snake_case__ )
trainer.save_metrics('train' , snake_case__ )
trainer.save_state()
# Evaluation
UpperCamelCase : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase : int = trainer.evaluate()
UpperCamelCase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
UpperCamelCase : Optional[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
return results
if __name__ == "__main__":
main()
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCamelCase = deprecated_arg[3:]
setattr(self , __UpperCAmelCase , not kwargs.pop(__UpperCAmelCase ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__UpperCamelCase = kwargs.pop('torchscript' , self.torchscript )
__UpperCamelCase = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__UpperCamelCase = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**__UpperCAmelCase )
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Trace the models using torchscript"} )
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowercase = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__UpperCamelCase = torch.device('cpu' )
__UpperCamelCase = 0
elif is_torch_tpu_available():
__UpperCamelCase = xm.xla_device()
__UpperCamelCase = 0
else:
__UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 293 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return str(snake_case ) == str(snake_case )[::-1]
def A ( snake_case :int ) -> int:
return int(snake_case ) + int(str(snake_case )[::-1] )
def A ( snake_case :int = 1_0_0_0_0 ) -> int:
__UpperCamelCase = []
for num in range(1 , snake_case ):
__UpperCamelCase = 0
__UpperCamelCase = num
while iterations < 5_0:
__UpperCamelCase = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 293 | 1 |
from itertools import count
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * min_block_length
for n in count(lowercase_ ):
fill_count_functions.append(1 )
for block_length in range(lowercase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 52 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Tuple , A : int=7 , A : str=3 , A : List[Any]=30 , A : str=4_00 , A : Tuple=True , A : str=None , A : Tuple=True , A : List[str]=[0.5, 0.5, 0.5] , A : int=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[str]=1 / 2_55 , A : List[Any]=True , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Tuple , A : int , A : int=False) -> Dict:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(A , Image.Image):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w)
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h)
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_UpperCAmelCase = max(A , key=lambda A: item[0])[0]
_UpperCAmelCase = max(A , key=lambda A: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'image_mean'))
self.assertTrue(hasattr(A , 'image_std'))
self.assertTrue(hasattr(A , 'do_normalize'))
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'do_rescale'))
self.assertTrue(hasattr(A , 'do_pad'))
self.assertTrue(hasattr(A , 'size'))
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , A)
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , A)
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'image_id': 3_97_69, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=A , annotations=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
@slow
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic')
_UpperCAmelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify masks
_UpperCAmelCase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A)
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
| 721 |
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''whether to use adafactor'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCamelCase = field(
default='''linear''' , metadata={'''help''': F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 82 |
'''simple docstring'''
def snake_case ( a_ : list[int] , a_ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(a_ ) == len(a_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] = equationa
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[str] = equationa
# Calculate the determinants of the matrices
UpperCamelCase_ : Union[str, Any] = aa * ba - aa * ba
UpperCamelCase_ : Dict = ca * ba - ca * ba
UpperCamelCase_ : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase_ : List[Any] = determinant_x / determinant
UpperCamelCase_ : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 208 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _snake_case ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
print("Generating prime p..." )
SCREAMING_SNAKE_CASE_ : Dict = rabinMiller.generate_large_prime(lowerCAmelCase )
print("Generating prime q..." )
SCREAMING_SNAKE_CASE_ : str = rabinMiller.generate_large_prime(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cryptoMath.find_mod_inverse(lowerCAmelCase , (p - 1) * (q - 1) )
SCREAMING_SNAKE_CASE_ : List[Any] = (n, e)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (n, d)
return (public_key, private_key)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = generate_key(lowerCAmelCase )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , "w" ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , "w" ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 316 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__lowerCamelCase : Tuple = TypeVar('''T''')
class a__ ( Generic[T] ):
def __init__( self : Optional[Any],_A : list[T],_A : Callable[[T, T], T] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any | T = None
SCREAMING_SNAKE_CASE_ : int = len(_A )
SCREAMING_SNAKE_CASE_ : list[T] = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE_ : List[str] = fnc
self.build()
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for p in range(self.N - 1,0,-1 ):
SCREAMING_SNAKE_CASE_ : Dict = self.fn(self.st[p * 2],self.st[p * 2 + 1] )
def __UpperCamelCase ( self : Any,_A : int,_A : T ):
"""simple docstring"""
p += self.N
SCREAMING_SNAKE_CASE_ : Optional[int] = v
while p > 1:
SCREAMING_SNAKE_CASE_ : Tuple = p // 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.fn(self.st[p * 2],self.st[p * 2 + 1] )
def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : int ): # noqa: E741
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = l + self.N, r + self.N
SCREAMING_SNAKE_CASE_ : T | None = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE_ : Any = self.st[l] if res is None else self.fn(_A,self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE_ : Any = self.st[r] if res is None else self.fn(_A,self.st[r] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__lowerCamelCase : int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__lowerCamelCase : Dict = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__lowerCamelCase : List[Any] = SegmentTree(test_array, min)
__lowerCamelCase : Any = SegmentTree(test_array, max)
__lowerCamelCase : str = SegmentTree(test_array, lambda a, b: a + b)
def _snake_case ( ):
"""simple docstring"""
for i in range(len(lowerCAmelCase ) ):
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : int = reduce(lowerCAmelCase , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE_ : List[Any] = reduce(lowerCAmelCase , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE_ : int = reduce(lambda lowerCAmelCase , lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
assert max_range == max_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
assert sum_range == sum_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__lowerCamelCase : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 316 | 1 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : list[int] ):
'''simple docstring'''
__lowercase =[]
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
__lowercase =nums.pop(0 )
__lowercase =permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def backtrack(lowercase__ : str ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__, len(lowercase__ ) ):
__lowercase , __lowercase =nums[i], nums[start]
backtrack(start + 1 )
__lowercase , __lowercase =nums[i], nums[start] # backtrack
__lowercase =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 119 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCamelCase ( lowercase__ : int = 1_50_00_00 ):
'''simple docstring'''
__lowercase =defaultdict(lowercase__ )
__lowercase =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, lowercase__, 2 ):
if gcd(lowercase__, lowercase__ ) > 1:
continue
__lowercase =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowercase__, limit + 1, lowercase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 119 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 184 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = LayoutLMTokenizer
_lowercase : List[str] = LayoutLMTokenizerFast
_lowercase : List[Any] = True
_lowercase : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
super().setUp()
__magic_name__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , **__UpperCamelCase: Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple , __UpperCamelCase: List[str] ):
'''simple docstring'''
__magic_name__ = 'UNwant\u00E9d,running'
__magic_name__ = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__UpperCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
pass
| 184 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 602 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _a :
"""simple docstring"""
def __init__( self : Any , __UpperCamelCase : List[Any] , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = 1_3
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = 2
_UpperCAmelCase = 9_9
_UpperCAmelCase = 0
_UpperCAmelCase = 3_2
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 5_1_2
_UpperCAmelCase = 1_6
_UpperCAmelCase = 2
_UpperCAmelCase = 0.0_2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = '''last'''
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = 0
def lowercase__ ( self : Tuple )->Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , )->Optional[Any]:
_UpperCAmelCase = TFFlaubertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , )->Optional[Any]:
_UpperCAmelCase = TFFlaubertWithLMHeadModel(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , )->Optional[int]:
_UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , )->Dict:
_UpperCAmelCase = TFFlaubertForSequenceClassification(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] , )->Optional[int]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFFlaubertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , )->int:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFFlaubertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] )->Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = TFFlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=3_7 )
def lowercase__ ( self : Optional[int] )->Tuple:
self.config_tester.run_common_tests()
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCamelCase )
@slow
def lowercase__ ( self : int )->Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFFlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 602 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : str = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase( _a ):
snake_case_ : List[Any] = """fnet"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict=3_2_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE : Any=3_0_7_2 , SCREAMING_SNAKE_CASE : Tuple="gelu_new" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1e-1_2 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=5_1_2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , **SCREAMING_SNAKE_CASE : List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = initializer_range
__snake_case = type_vocab_size
__snake_case = layer_norm_eps
__snake_case = use_tpu_fourier_optimizations
__snake_case = tpu_short_seq_length
| 473 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Tuple = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class UpperCamelCase( _a ):
snake_case_ : List[Any] = """data2vec-vision"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str]=7_6_8 , SCREAMING_SNAKE_CASE : Tuple=1_2 , SCREAMING_SNAKE_CASE : int=1_2 , SCREAMING_SNAKE_CASE : List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[str]=1e-1_2 , SCREAMING_SNAKE_CASE : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[str]=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE : int=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Tuple=0.4 , SCREAMING_SNAKE_CASE : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[Any]=2_5_5 , **SCREAMING_SNAKE_CASE : Any , ) -> List[str]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = use_mask_token
__snake_case = use_absolute_position_embeddings
__snake_case = use_relative_position_bias
__snake_case = use_shared_relative_position_bias
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case = out_indices
__snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = semantic_loss_ignore_index
class UpperCamelCase( _a ):
snake_case_ : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 473 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = 1
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : str ):
torch.manual_seed(0 )
_UpperCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase_ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
def extract(*__UpperCamelCase : Any , **__UpperCamelCase : List[str] ):
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = torch.ones([0] )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.dummy_cond_unet
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase = 77
_UpperCAmelCase = self.dummy_image.to(UpperCamelCase_ )
_UpperCAmelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
_UpperCAmelCase = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
_UpperCAmelCase = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCamelCase_ , )
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
_UpperCAmelCase = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.dummy_cond_unet
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase = 77
_UpperCAmelCase = self.dummy_image.to(UpperCamelCase_ )
# put models in fp16
_UpperCAmelCase = unet.half()
_UpperCAmelCase = vae.half()
_UpperCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
_UpperCAmelCase = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = alt_pipe(
[prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="np" , image=UpperCamelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase = init_image.resize((760, 504) )
_UpperCAmelCase = 'BAAI/AltDiffusion'
_UpperCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'A fantasy landscape, trending on artstation'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type="np" , )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_UpperCAmelCase = init_image.resize((768, 512) )
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_UpperCAmelCase = 'BAAI/AltDiffusion'
_UpperCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'A fantasy landscape, trending on artstation'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 684 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
"""simple docstring"""
import requests
__snake_case = '''YOUR API KEY'''
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str = giphy_api_key ):
"""simple docstring"""
_a = '''+'''.join(query.split() )
_a = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(_lowerCAmelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship'''))) | 285 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a , _a = emb.weight.shape
_a = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase )
_a = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any]="facebook/mbart-large-en-ro", _lowerCAmelCase : Any=False, _lowerCAmelCase : int=False ):
"""simple docstring"""
_a = torch.load(_lowerCAmelCase, map_location='''cpu''' )['''model''']
remove_ignore_keys_(_lowerCAmelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__snake_case = parser.parse_args()
__snake_case = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 285 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : List[str] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] ):
inspect_dataset(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Dict ):
inspect_metric(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Any , __snake_case : Union[str, Any] ):
_A = get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] ):
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[Any] ):
_A = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
_A = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Optional[int] , __snake_case : List[Any] ):
_A = get_dataset_infos(__snake_case )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any ):
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 107 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> int:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase : List[str] = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowerCamelCase : int = dict(scheduler.config )
lowerCamelCase : int = 1
lowerCamelCase : str = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase : Optional[int] = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowerCamelCase : int = dict(scheduler.config )
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
self.enable_attention_slicing(UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase : int = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> List[Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> List[Any]:
lowerCamelCase : str = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase : Union[str, Any] = self.segmentation_model(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase : Tuple = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase : Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 311 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=3_7 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = self.vocab_size - 1
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size] , self.num_choices)
snake_case__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , *UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = OpenAIGPTModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = OpenAIGPTLMHeadModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = OpenAIGPTForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowercase : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowercase : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int=False):
'''simple docstring'''
snake_case__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
snake_case__ = inputs_dict["""labels"""]
snake_case__ = inputs_dict["""labels"""]
snake_case__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
return inputs_dict
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = OpenAIGPTModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7)
def __magic_name__ ( self : int):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__)
@slow
def __magic_name__ ( self : str):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = OpenAIGPTModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""")
model.to(UpperCamelCase__)
snake_case__ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=UpperCamelCase__) # the president is
snake_case__ = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case__ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__)
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__)
| 708 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCAmelCase ( a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( ):
snake_case__ = 2
while True:
if is_prime(a ):
yield num
num += 1
def _UpperCAmelCase ( a : int = 200_0000 ):
return sum(takewhile(lambda a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 99 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ =SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ =features.copy() if features else default_expected_features
UpperCAmelCase__ =(
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( A ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
UpperCAmelCase__ =con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
with pytest.raises(lowerCamelCase__ ):
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : Any = [], []
while len(__UpperCamelCase ) > 1:
__snake_case ,__snake_case : Optional[Any] = min(__UpperCamelCase ), max(__UpperCamelCase )
start.append(__UpperCamelCase )
end.append(__UpperCamelCase )
collection.remove(__UpperCamelCase )
collection.remove(__UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''') | 708 |
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 61 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A_: List[str] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A_: List[Any] = logging.get_logger(__name__)
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'maskformer'
lowerCAmelCase__ = {'hidden_size': 'mask_feature_size'}
lowerCAmelCase__ = ['resnet', 'swin']
lowerCAmelCase__ = ['detr']
def __init__( self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 0.1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0.02 , UpperCAmelCase = 1.0 , UpperCAmelCase = 1.0 , UpperCAmelCase = 1.0 , UpperCAmelCase = 20.0 , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowercase = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase = backbone_config.pop("""model_type""" )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowercase = DetrConfig()
else:
# verify that the decoder is supported
_lowercase = (
decoder_config.pop("""model_type""" ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {",".join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase = CONFIG_MAPPING[decoder_type]
_lowercase = config_class.from_dict(UpperCAmelCase )
_lowercase = backbone_config
_lowercase = decoder_config
# main feature dimension for the model
_lowercase = fpn_feature_size
_lowercase = mask_feature_size
# initializer
_lowercase = init_std
_lowercase = init_xavier_std
# Hungarian matcher && loss
_lowercase = cross_entropy_weight
_lowercase = dice_weight
_lowercase = mask_weight
_lowercase = use_auxiliary_loss
_lowercase = no_object_weight
_lowercase = output_auxiliary_logits
_lowercase = self.decoder_config.encoder_attention_heads
_lowercase = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return cls(
backbone_config=UpperCAmelCase , decoder_config=UpperCAmelCase , **UpperCAmelCase , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.backbone_config.to_dict()
_lowercase = self.decoder_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398 | from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( _A ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCAmelCase , default=UpperCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = model
_lowercase = cache
_lowercase = force
_lowercase = trust_remote_code
def _UpperCAmelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 398 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( a , a ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a , a ) ) )
def A_ ( a , a ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Tuple = (
'Wrong input data\'s dimensions... '
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(a )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Wrong input data\'s shape... '
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(a )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Input data have different datatype... '
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(a )
SCREAMING_SNAKE_CASE_ : str = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = euclidean(a , dataset[0] )
SCREAMING_SNAKE_CASE_ : List[str] = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : Tuple = euclidean(a , a )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : Dict = temp_dist
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( a , a ):
"""simple docstring"""
return np.dot(a , a ) / (norm(a ) * norm(a ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
def A_ ( a , a , a ):
"""simple docstring"""
if len(a ) != len(a ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
SCREAMING_SNAKE_CASE_ : Tuple = [p / w for p, w in zip(a , a )]
# Creating a copy of the list and sorting profit/weight in ascending order
SCREAMING_SNAKE_CASE_ : List[Any] = sorted(a )
# declaring useful variables
SCREAMING_SNAKE_CASE_ : List[Any] = len(a )
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted_profit_by_weight[length - i - 1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = profit_by_weight.index(a )
SCREAMING_SNAKE_CASE_ : Dict = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase : Tuple = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase : Union[str, Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase : Dict = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 353 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if not is_accelerate_available():
return method
__a : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase_ ) < version.parse('0.17.0' ):
return method
def wrapper(self : str , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Union[str, Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCamelCase_ , **lowerCamelCase_ )
return wrapper
| 47 | """simple docstring"""
from __future__ import annotations
import pandas as pd
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 9_99_99_99_99
_UpperCAmelCase = 0
_UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_UpperCAmelCase = remaining_time[j]
_UpperCAmelCase = j
_UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_UpperCAmelCase = remaining_time[short]
if minm == 0:
_UpperCAmelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_UpperCAmelCase = False
# Find finish time of current process
_UpperCAmelCase = increment_time + 1
# Calculate waiting time
_UpperCAmelCase = finish_time - arrival_time[short]
_UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * no_of_processes
for i in range(lowercase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(lowercase ):
_UpperCAmelCase = total_waiting_time + waiting_time[i]
_UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
UpperCAmelCase__ = int(input())
UpperCAmelCase__ = [0] * no_of_processes
UpperCAmelCase__ = [0] * no_of_processes
UpperCAmelCase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
UpperCAmelCase__ , UpperCAmelCase__ = map(int, input().split())
UpperCAmelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase__ = burst_time
UpperCAmelCase__ = no_of_processes
UpperCAmelCase__ = waiting_time
UpperCAmelCase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 277 | 0 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self: str , *UpperCamelCase: Dict , **UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=UpperCamelCase , **self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int=None , UpperCamelCase: Any=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: Any=None , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
UpperCamelCase , padding=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=self.framework )
A__ = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, 'hole']""" )
A__ = handle_long_generation
preprocess_params.update(UpperCamelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
if len(UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self: str , *UpperCamelCase: List[str] , **UpperCamelCase: Dict ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*UpperCamelCase , **UpperCamelCase )
def __call__( self: str , UpperCamelCase: Optional[int] , **UpperCamelCase: Tuple ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any="" , UpperCamelCase: Any=None , **UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs["""max_new_tokens"""]
else:
A__ = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
A__ = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase ( self: Dict , UpperCamelCase: Dict , **UpperCamelCase: int ):
"""simple docstring"""
A__ = model_inputs["""input_ids"""]
A__ = model_inputs.get("""attention_mask""" , UpperCamelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
A__ = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=UpperCamelCase , attention_mask=UpperCamelCase , **UpperCamelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(UpperCamelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(UpperCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: int=ReturnType.FULL_TEXT , UpperCamelCase: Any=True ):
"""simple docstring"""
A__ = model_outputs["""generated_sequence"""][0]
A__ = model_outputs["""input_ids"""]
A__ = model_outputs["""prompt_text"""]
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , ) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {"""generated_text""": all_text}
records.append(UpperCamelCase )
return records
| 717 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = [0] * len(UpperCAmelCase_ )
A__ = []
A__ = [1] * len(UpperCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
print(max(UpperCAmelCase_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE_ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 500 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : Dict = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ : str = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE__ : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , **A_ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Dict = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , **A_ , )
_UpperCAmelCase : Any = vocab_file
_UpperCAmelCase : Tuple = False if not self.vocab_file else True
_UpperCAmelCase : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_UpperCAmelCase : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else """en_XX"""
_UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang )
_UpperCAmelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __snake_case( self ):
return self._src_lang
@src_lang.setter
def __snake_case( self , A_ ):
_UpperCAmelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case( self , A_ , A_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case( self , A_ , A_ = None ):
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case( self , A_ , A_ , A_ , A_ , **A_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase : Dict = src_lang
_UpperCAmelCase : Dict = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
_UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(A_ )
_UpperCAmelCase : str = tgt_lang_id
return inputs
def __snake_case( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , ):
_UpperCAmelCase : int = src_lang
_UpperCAmelCase : int = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __snake_case( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case( self , A_ ):
_UpperCAmelCase : int = self.convert_tokens_to_ids(A_ )
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case( self , A_ ):
_UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(A_ )
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case( self , A_ , A_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_UpperCAmelCase : int = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 643 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A )
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE = Features({'''image''': Image()} )
__SCREAMING_SNAKE_CASE = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE = "image"
__SCREAMING_SNAKE_CASE = "labels"
def __snake_case( self , A_ ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , A_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCAmelCase : Tuple = copy.deepcopy(self )
_UpperCAmelCase : str = self.label_schema.copy()
_UpperCAmelCase : Optional[Any] = features[self.label_column]
_UpperCAmelCase : int = label_schema
return task_template
@property
def __snake_case( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 643 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self: List[Any] , *_lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Dict=None , **_lowerCamelCase: Dict ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = eval_examples
SCREAMING_SNAKE_CASE_ = post_process_function
def _A ( self: Tuple , _lowerCamelCase: Optional[Dataset] = None , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: str = "eval" , **_lowerCamelCase: Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ = gen_kwargs
SCREAMING_SNAKE_CASE_ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _A ( self: List[str] , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str = "test" , **_lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 718 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Dict = ["image"]
SCREAMING_SNAKE_CASE__ : List[Any] = ["image"]
SCREAMING_SNAKE_CASE__ : List[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = False
@property
def _A ( self: Optional[Any] ):
return 32
@property
def _A ( self: Optional[int] ):
return 32
@property
def _A ( self: List[Any] ):
return self.time_input_dim * 4
@property
def _A ( self: Any ):
return 8
@property
def _A ( self: int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCamelCase )
return model
@property
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_24 , )
return image_processor
@property
def _A ( self: Dict ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**_lowerCamelCase )
return model
@property
def _A ( self: List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ = ShapERenderer(**_lowerCamelCase )
return model
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_image_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_image_processor
SCREAMING_SNAKE_CASE_ = self.dummy_renderer
SCREAMING_SNAKE_CASE_ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=_lowerCamelCase , clip_sample=_lowerCamelCase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _A ( self: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any]=0 ):
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: str ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ = pipe(**_lowerCamelCase , num_images_per_prompt=_lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE_ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 89 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(_snake_case )
if is_prime(_snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 516 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 516 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Tuple , *A__ : Union[str, Any] , **A__ : Optional[Any] ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 459 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Optional[Any] = '''MCTCTFeatureExtractor'''
UpperCAmelCase : Tuple = '''AutoTokenizer'''
def __init__(self : int , A__ : Tuple , A__ : Union[str, Any] ) -> Dict:
super().__init__(A__ , A__ )
lowercase = self.feature_extractor
lowercase = False
def __call__(self : Tuple , *A__ : str , **A__ : Dict ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase = kwargs.pop("raw_speech" )
else:
lowercase = kwargs.pop("audio" , A__ )
lowercase = kwargs.pop("sampling_rate" , A__ )
lowercase = kwargs.pop("text" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
lowercase = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase = encodings["input_ids"]
return inputs
def UpperCAmelCase__ (self : Tuple , *A__ : str , **A__ : str ) -> str:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase__ (self : Any , *A__ : List[Any] , **A__ : List[str] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
lowercase = kwargs.pop("input_features" , A__ )
lowercase = kwargs.pop("labels" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if input_features is not None:
lowercase = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
lowercase = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase = labels["input_ids"]
return input_features
def UpperCAmelCase__ (self : Tuple , *A__ : Optional[int] , **A__ : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def UpperCAmelCase__ (self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.feature_extractor
lowercase = False
| 459 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def A ( lowercase__ : Accelerator , lowercase__ : int = 16 ) -> Dict:
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase__ :Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Union[str, Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :int = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :List[Any] = 8
else:
UpperCamelCase__ :Any = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase__ :Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
UpperCamelCase__ :List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def A ( lowercase__ : str , lowercase__ : Optional[int] ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
UpperCamelCase__ :List[str] = 2
# Initialize accelerator
UpperCamelCase__ :Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Tuple = config["""lr"""]
UpperCamelCase__ :List[str] = int(config["""num_epochs"""] )
UpperCamelCase__ :List[str] = int(config["""seed"""] )
UpperCamelCase__ :List[str] = int(config["""batch_size"""] )
UpperCamelCase__ :Any = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase__ )
def inner_training_loop(lowercase__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :List[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Any = AdamW(params=model.parameters() , lr=lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate scheduler
UpperCamelCase__ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
UpperCamelCase__ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ) -> List[Any]:
UpperCamelCase__ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase__ :Any = parser.parse_args()
UpperCamelCase__ :List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 45 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase_: Optional[Any] = 'hf-internal-testing/tiny-random-bert'
lowercase_: Dict = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
lowercase_: Union[str, Any] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ):
snake_case__ : List[Any] = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , """refs""" , """main""" ) ) as f:
snake_case__ : int = f.read()
self.assertEqual(__a , os.path.join(__a , """snapshots""" , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
snake_case__ : List[str] = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
snake_case__ : str = cached_file(__a , __a , revision="""9b8c223""" )
self.assertEqual(__a , os.path.join(__a , """snapshots""" , __a , __a ) )
def lowercase ( self : Any ):
with self.assertRaisesRegex(__a , """is not a valid model identifier""" ):
snake_case__ : Any = cached_file("""tiny-random-bert""" , __a )
with self.assertRaisesRegex(__a , """is not a valid git identifier""" ):
snake_case__ : List[Any] = cached_file(__a , __a , revision="""aaaa""" )
with self.assertRaisesRegex(__a , """does not appear to have a file named""" ):
snake_case__ : Any = cached_file(__a , """conf""" )
def lowercase ( self : Union[str, Any] ):
with self.assertRaisesRegex(__a , """does not appear to have a file named""" ):
snake_case__ : Tuple = cached_file(__a , """conf""" )
with open(os.path.join(__a , """refs""" , """main""" ) ) as f:
snake_case__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , """.no_exist""" , __a , """conf""" ) ) )
snake_case__ : List[str] = cached_file(__a , """conf""" , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
snake_case__ : Optional[Any] = cached_file(__a , """conf""" , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
snake_case__ : Dict = mock.Mock()
snake_case__ : int = 5_0_0
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__a ) as mock_head:
snake_case__ : Optional[Any] = cached_file(__a , """conf""" , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase ( self : Optional[int] ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __a ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __a ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __a ) )
def lowercase ( self : Union[str, Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , __a , revision="""ahaha""" )
snake_case__ : List[str] = get_file_from_repo("""bert-base-cased""" , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case__ : List[Any] = json.loads(open(__a , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_6_8 )
def lowercase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = Path(__a ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(__a , """a.txt""" ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , """b.txt""" ) )
| 715 |
import colorsys
from PIL import Image # type: ignore
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = x
snake_case__ : int = y
for step in range(UpperCAmelCase_): # noqa: B007
snake_case__ : str = a * a - b * b + x
snake_case__ : Tuple = 2 * a * b + y
snake_case__ : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(UpperCAmelCase_ , 1 , 1))
def _lowercase ( UpperCAmelCase_ = 800 , UpperCAmelCase_ = 600 , UpperCAmelCase_ = -0.6 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 3.2 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = True , ):
"""simple docstring"""
snake_case__ : Dict = Image.new("""RGB""" , (image_width, image_height))
snake_case__ : Optional[Any] = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase_):
for image_y in range(UpperCAmelCase_):
# determine the figure-coordinates based on the image-coordinates
snake_case__ : Optional[int] = figure_width / image_width * image_height
snake_case__ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case__ : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case__ : int = get_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case__ : List[str] = get_color_coded_rgb(UpperCAmelCase_)
else:
snake_case__ : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase_)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase_: List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 127 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase_ ( _snake_case ):
"""simple docstring"""
a_ ="vit_msn"
def __init__( self : Tuple , _a : str=768 , _a : List[str]=12 , _a : Union[str, Any]=12 , _a : List[Any]=3072 , _a : Union[str, Any]="gelu" , _a : List[str]=0.0 , _a : Union[str, Any]=0.0 , _a : Tuple=0.02 , _a : Any=1e-06 , _a : Union[str, Any]=224 , _a : Tuple=16 , _a : Optional[int]=3 , _a : Dict=True , **_a : int , ) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : Dict = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[str] = qkv_bias
| 459 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : torch.FloatTensor
class snake_case ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.18215 , lowerCamelCase_ : str = "group" , ) ->str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->VQEncoderOutput:
'''simple docstring'''
UpperCAmelCase__ = self.encoder(lowerCamelCase_ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase_ ).latents
UpperCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 392 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = '''T5Config'''
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig | 191 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case ( lowercase__):
def A__ ( self : List[Any] ):
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_, "embed_dim" ) )
self.parent.assertTrue(hasattr(a_, "num_heads" ) )
class _snake_case :
def __init__( self : int, __lowercase : str, __lowercase : Any=13, __lowercase : List[Any]=64, __lowercase : Union[str, Any]=3, __lowercase : Dict=[16, 48, 96], __lowercase : Union[str, Any]=[1, 3, 6], __lowercase : Optional[int]=[1, 2, 10], __lowercase : Dict=[7, 3, 3], __lowercase : Any=[4, 2, 2], __lowercase : List[str]=[2, 1, 1], __lowercase : Dict=[2, 2, 2], __lowercase : Dict=[False, False, True], __lowercase : str=[0.0, 0.0, 0.0], __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=1e-1_2, __lowercase : Optional[Any]=True, __lowercase : Tuple=True, __lowercase : List[Any]=2, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = stride_kv
lowercase__ = depth
lowercase__ = cls_token
lowercase__ = attention_drop_rate
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
def A__ ( self : Tuple ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : Tuple ):
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[Any], __lowercase : Dict ):
lowercase__ = TFCvtModel(config=a_ )
lowercase__ = model(a_, training=a_ )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def A__ ( self : Dict, __lowercase : Union[str, Any], __lowercase : Union[str, Any], __lowercase : Any ):
lowercase__ = self.num_labels
lowercase__ = TFCvtForImageClassification(a_ )
lowercase__ = model(a_, labels=a_, training=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : str ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Union[str, Any] =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : Optional[int] =(
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Tuple =False
def A__ ( self : Any ):
lowercase__ = TFCvtModelTester(self )
lowercase__ = TFCvtConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def A__ ( self : Optional[int] ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def A__ ( self : Dict ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def A__ ( self : List[Any] ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def A__ ( self : Dict ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", )
def A__ ( self : str ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", )
@slow
def A__ ( self : str ):
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def A__ ( self : Dict ):
lowercase__ = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(a_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def A__ ( self : Union[str, Any] ):
def check_hidden_states_output(__lowercase : List[str], __lowercase : List[Any], __lowercase : Any ):
lowercase__ = model_class(a_ )
lowercase__ = model(**self._prepare_for_class(a_, a_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depth )
self.assertEqual(len(a_ ), a_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(a_, a_, a_ )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def A__ ( self : Any ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFCvtModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self : Optional[Any] ):
lowercase__ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a_, return_tensors="tf" )
# forward pass
lowercase__ = model(**a_ )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, a_ )
lowercase__ = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), a_, atol=1e-4 ) )
| 413 |
import numpy as np
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __eq__( self ,a_ ):
"""simple docstring"""
return self.position == cell.position
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
print(self.position )
class __snake_case :
def __init__( self ,a_=(5, 5) ):
"""simple docstring"""
lowerCAmelCase__ = np.zeros(a_ )
lowerCAmelCase__ = world_size[0]
lowerCAmelCase__ = world_size[1]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
print(self.w )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCAmelCase__ = cell.position[0]
lowerCAmelCase__ = cell.position[1]
lowerCAmelCase__ = []
for n in neughbour_cord:
lowerCAmelCase__ = current_x + n[0]
lowerCAmelCase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCAmelCase__ = Cell()
lowerCAmelCase__ = (x, y)
lowerCAmelCase__ = cell
neighbours.append(a_ )
return neighbours
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
_open.append(snake_case__ )
while _open:
lowerCAmelCase__ = np.argmin([n.f for n in _open] )
lowerCAmelCase__ = _open[min_f]
_closed.append(_open.pop(snake_case__ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case__ ):
for c in _closed:
if c == n:
continue
lowerCAmelCase__ = current.g + 1
lowerCAmelCase__ , lowerCAmelCase__ = n.position
lowerCAmelCase__ , lowerCAmelCase__ = goal.position
lowerCAmelCase__ = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCAmelCase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case__ )
lowerCAmelCase__ = []
while current.parent is not None:
path.append(current.position )
lowerCAmelCase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_lowerCAmelCase : str = Gridworld()
# Start position and goal
_lowerCAmelCase : List[Any] = Cell()
_lowerCAmelCase : List[str] = (0, 0)
_lowerCAmelCase : Optional[int] = Cell()
_lowerCAmelCase : int = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_lowerCAmelCase : Optional[int] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_lowerCAmelCase : int = 1
print(world.w)
| 193 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_=0.2 , a_=0.2 ):
lowerCamelCase_ : Optional[Any] = bp_numa
lowerCamelCase_ : int = bp_numa
lowerCamelCase_ : Tuple = bp_numa
lowerCamelCase_ : List[str] = conva_get[:2]
lowerCamelCase_ : List[str] = conva_get[2]
lowerCamelCase_ : int = size_pa
lowerCamelCase_ : Dict = rate_w
lowerCamelCase_ : Tuple = rate_t
lowerCamelCase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase_ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase ( self , a_ ):
# save model dict with pickle
lowerCamelCase_ : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(a_ , "wb" ) as f:
pickle.dump(a_ , a_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def _UpperCamelCase ( cls , a_ ):
# read saved model
with open(a_ , "rb" ) as f:
lowerCamelCase_ : Tuple = pickle.load(a_ ) # noqa: S301
lowerCamelCase_ : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowerCamelCase_ : Union[str, Any] = model_dic.get("size_pooling1" )
lowerCamelCase_ : Union[str, Any] = model_dic.get("num_bp1" )
lowerCamelCase_ : List[Any] = model_dic.get("num_bp2" )
lowerCamelCase_ : Any = model_dic.get("num_bp3" )
lowerCamelCase_ : int = model_dic.get("rate_weight" )
lowerCamelCase_ : Tuple = model_dic.get("rate_thre" )
# create model instance
lowerCamelCase_ : Tuple = CNN(a_ , a_ , a_ , a_ , a_ , a_ , a_ )
# modify model parameter
lowerCamelCase_ : Tuple = model_dic.get("w_conv1" )
lowerCamelCase_ : List[str] = model_dic.get("wkj" )
lowerCamelCase_ : Tuple = model_dic.get("vji" )
lowerCamelCase_ : Dict = model_dic.get("thre_conv1" )
lowerCamelCase_ : Optional[int] = model_dic.get("thre_bp2" )
lowerCamelCase_ : List[str] = model_dic.get("thre_bp3" )
return conv_ins
def _UpperCamelCase ( self , a_ ):
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase ( self , a_ ):
return round(a_ , 3 )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
# convolution process
lowerCamelCase_ : Union[str, Any] = convs[0]
lowerCamelCase_ : Dict = convs[1]
lowerCamelCase_ : Optional[Any] = np.shape(a_ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase_ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , a_ ):
for j_focus in range(0 , size_data - size_conv + 1 , a_ ):
lowerCamelCase_ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a_ ):
lowerCamelCase_ : List[Any] = []
for i_focus in range(len(a_ ) ):
lowerCamelCase_ : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a_ ) )
lowerCamelCase_ : Tuple = np.asmatrix(a_ ).reshape(
a_ , a_ )
data_featuremap.append(a_ )
# expanding the data slice to One dimenssion
lowerCamelCase_ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a_ ) )
lowerCamelCase_ : str = np.asarray(a_ )
return focus_list, data_featuremap
def _UpperCamelCase ( self , a_ , a_ , a_="average_pool" ):
# pooling process
lowerCamelCase_ : Any = len(featuremaps[0] )
lowerCamelCase_ : str = int(size_map / size_pooling )
lowerCamelCase_ : Tuple = []
for i_map in range(len(a_ ) ):
lowerCamelCase_ : List[Any] = featuremaps[i_map]
lowerCamelCase_ : Dict = []
for i_focus in range(0 , a_ , a_ ):
for j_focus in range(0 , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a_ ) )
lowerCamelCase_ : Tuple = np.asmatrix(a_ ).reshape(a_ , a_ )
featuremap_pooled.append(a_ )
return featuremap_pooled
def _UpperCamelCase ( self , a_ ):
# expanding three dimension data to one dimension list
lowerCamelCase_ : Optional[Any] = []
for i in range(len(a_ ) ):
lowerCamelCase_ : Tuple = np.shape(data[i] )
lowerCamelCase_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(a_ )
lowerCamelCase_ : Optional[int] = np.asarray(a_ )
return data_expanded
def _UpperCamelCase ( self , a_ ):
# expanding matrix to one dimension list
lowerCamelCase_ : Union[str, Any] = np.asarray(a_ )
lowerCamelCase_ : str = np.shape(a_ )
lowerCamelCase_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : List[str] = 0
for i_map in range(a_ ):
lowerCamelCase_ : List[Any] = np.ones((size_map, size_map) )
for i in range(0 , a_ , a_ ):
for j in range(0 , a_ , a_ ):
lowerCamelCase_ : Any = pd_pool[
i_pool
]
lowerCamelCase_ : List[Any] = i_pool + 1
lowerCamelCase_ : Optional[int] = np.multiply(
a_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a_ )
return pd_all
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(a_ )) )
print((" - - Shape: Teach_Data ", np.shape(a_ )) )
lowerCamelCase_ : str = 0
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Optional[Any] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase_ : Optional[Any] = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(a_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase_ : Optional[int] = np.asmatrix(datas_train[p] )
lowerCamelCase_ : List[Any] = np.asarray(datas_teach[p] )
lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : Dict = self.pooling(a_ , self.size_poolinga )
lowerCamelCase_ : int = np.shape(a_ )
lowerCamelCase_ : Optional[Any] = self._expand(a_ )
lowerCamelCase_ : Optional[Any] = data_bp_input
lowerCamelCase_ : Dict = np.dot(a_ , self.vji.T ) - self.thre_bpa
lowerCamelCase_ : str = self.sig(a_ )
lowerCamelCase_ : Optional[int] = np.dot(a_ , self.wkj.T ) - self.thre_bpa
lowerCamelCase_ : Union[str, Any] = self.sig(a_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase_ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(a_ , (1 - bp_outa) ) )
lowerCamelCase_ : Union[str, Any] = np.multiply(
np.dot(a_ , self.wkj ) , np.multiply(a_ , (1 - bp_outa) ) )
lowerCamelCase_ : str = np.dot(a_ , self.vji )
lowerCamelCase_ : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
lowerCamelCase_ : Union[str, Any] = self._calculate_gradient_from_pool(
a_ , a_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase_ : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase_ : Dict = self.rate_weight * np.dot(a_ , a_ )
lowerCamelCase_ : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase_ : List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase_ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase_ : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase_ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase_ : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase_ : Any = rp + 1
lowerCamelCase_ : List[Any] = error_count / patterns
all_mse.append(a_ )
def draw_error():
lowerCamelCase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a_ , "+-" )
plt.plot(a_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(a_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCamelCase ( self , a_ ):
# model predict
lowerCamelCase_ : Optional[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(a_ )) )
for p in range(len(a_ ) ):
lowerCamelCase_ : Any = np.asmatrix(datas_test[p] )
lowerCamelCase_ ,lowerCamelCase_ : Dict = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : int = self.pooling(a_ , self.size_poolinga )
lowerCamelCase_ : Dict = self._expand(a_ )
lowerCamelCase_ : List[str] = data_bp_input
lowerCamelCase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase_ : List[Any] = self.sig(a_ )
lowerCamelCase_ : int = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase_ : int = self.sig(a_ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase_ : str = [list(map(self.do_round , a_ ) ) for each in produce_out]
return np.asarray(a_ )
def _UpperCamelCase ( self , a_ ):
# return the data of image after convoluting process so we can check it out
lowerCamelCase_ : Any = np.asmatrix(a_ )
lowerCamelCase_ ,lowerCamelCase_ : int = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : str = self.pooling(a_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( A__ ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=True , a=False , a=False , a=False , a=2 , a=99 , a=0 , a=32 , a=5 , a=4 , a=0.1 , a=0.1 , a=512 , a=12 , a=2 , a=0.02 , a=3 , a=4 , a="last" , a=None , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_lengths
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = gelu_activation
SCREAMING_SNAKE_CASE = sinusoidal_embeddings
SCREAMING_SNAKE_CASE = causal
SCREAMING_SNAKE_CASE = asm
SCREAMING_SNAKE_CASE = n_langs
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_special
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = use_proj
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2).float()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , lengths=a , langs=a)
SCREAMING_SNAKE_CASE = model(a , langs=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any:
SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Any:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , emb_dim=37)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(config=a)
SCREAMING_SNAKE_CASE = self._prepare_for_class(a , a)
SCREAMING_SNAKE_CASE = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt'))
SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(a , 'traced_model.pt') , map_location=a)
loaded(inputs_dict['input_ids'].to(a) , inputs_dict['attention_mask'].to(a))
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768))
self.assertEqual(output.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4))
| 73 |
"""simple docstring"""
import math
def UpperCamelCase ( _lowerCAmelCase : int ) -> str:
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Dict = 0
while num > 0:
_UpperCAmelCase : str = num % 8
_UpperCAmelCase : Any = octal + (remainder * math.floor(math.pow(10, _lowerCAmelCase ) ))
counter += 1
_UpperCAmelCase : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(_lowerCAmelCase )}'''
def UpperCamelCase ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 238 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase__ = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ = 'MobileNetV1Config'
# Base docstring
UpperCamelCase__ = 'google/mobilenet_v1_1.0_224'
UpperCamelCase__ = [1, 1024, 7, 7]
# Image classification docstring
UpperCamelCase__ = 'google/mobilenet_v1_1.0_224'
UpperCamelCase__ = 'tabby, tabby cat'
UpperCamelCase__ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : str = {}
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : str = model.mobilenet_va
else:
lowercase_ : List[str] = model
lowercase_ : Any = "MobilenetV1/Conv2d_0/"
lowercase_ : Tuple = backbone.conv_stem.convolution.weight
lowercase_ : int = backbone.conv_stem.normalization.bias
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.weight
lowercase_ : Optional[int] = backbone.conv_stem.normalization.running_mean
lowercase_ : str = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowercase_ : Tuple = i + 1
lowercase_ : str = i * 2
lowercase_ : Tuple = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowercase_ : List[str] = pointer.convolution.weight
lowercase_ : Optional[int] = pointer.normalization.bias
lowercase_ : str = pointer.normalization.weight
lowercase_ : List[Any] = pointer.normalization.running_mean
lowercase_ : List[Any] = pointer.normalization.running_var
lowercase_ : Union[str, Any] = backbone.layer[pt_index + 1]
lowercase_ : List[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowercase_ : Optional[int] = pointer.convolution.weight
lowercase_ : List[str] = pointer.normalization.bias
lowercase_ : Optional[int] = pointer.normalization.weight
lowercase_ : Any = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : Optional[Any] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowercase_ : Dict = model.classifier.weight
lowercase_ : List[Any] = model.classifier.bias
return tf_to_pt_map
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(_UpperCamelCase )
lowercase_ : Any = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
lowercase_ : Optional[int] = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
lowercase_ : int = array
# Build TF to PyTorch weights loading map
lowercase_ : Optional[Any] = _build_tf_to_pytorch_map(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
lowercase_ : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
lowercase_ : Optional[Any] = np.transpose(_UpperCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : str = array.squeeze().transpose()
else:
lowercase_ : Union[str, Any] = np.transpose(_UpperCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
lowercase_ : List[Any] = torch.from_numpy(_UpperCamelCase )
tf_weights.pop(_UpperCamelCase , _UpperCamelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCamelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCamelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCamelCase )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : Dict = features.shape[-2:]
lowercase_ , lowercase_ : List[str] = conv_layer.stride
lowercase_ , lowercase_ : List[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Tuple = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : Any = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : Dict = pad_along_width // 2
lowercase_ : str = pad_along_width - pad_left
lowercase_ : List[Any] = pad_along_height // 2
lowercase_ : Optional[int] = pad_along_height - pad_top
lowercase_ : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCamelCase , _UpperCamelCase , "constant" , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , a : MobileNetVaConfig , a : int , a : int , a : int , a : Optional[int] = 1 , a : Optional[int] = 1 , a : bool = False , a : Optional[bool] = True , a : Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
lowercase_ : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowercase_ : List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : str = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , stride=a , padding=a , groups=a , bias=a , padding_mode="zeros" , )
if use_normalization:
lowercase_ : Optional[int] = nn.BatchNormad(
num_features=a , eps=config.layer_norm_eps , momentum=0.9997 , affine=a , track_running_stats=a , )
else:
lowercase_ : str = None
if use_activation:
if isinstance(a , a ):
lowercase_ : Union[str, Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a ):
lowercase_ : List[str] = ACTaFN[config.hidden_act]
else:
lowercase_ : int = config.hidden_act
else:
lowercase_ : Optional[int] = None
def lowerCAmelCase__ ( self : Optional[Any] , a : torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
lowercase_ : Dict = apply_tf_padding(a , self.convolution )
lowercase_ : List[Any] = self.convolution(a )
if self.normalization is not None:
lowercase_ : str = self.normalization(a )
if self.activation is not None:
lowercase_ : Any = self.activation(a )
return features
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = MobileNetVaConfig
__lowerCamelCase: Any = load_tf_weights_in_mobilenet_va
__lowerCamelCase: str = 'mobilenet_v1'
__lowerCamelCase: Union[str, Any] = 'pixel_values'
__lowerCamelCase: Union[str, Any] = False
def lowerCAmelCase__ ( self : Optional[int] , a : Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , snake_case , )
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[Any] , a : MobileNetVaConfig , a : bool = True ):
'''simple docstring'''
super().__init__(a )
lowercase_ : Optional[int] = config
lowercase_ : Tuple = 3_2
lowercase_ : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : int = MobileNetVaConvLayer(
a , in_channels=config.num_channels , out_channels=a , kernel_size=3 , stride=2 , )
lowercase_ : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : str = nn.ModuleList()
for i in range(1_3 ):
lowercase_ : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=3 , stride=strides[i] , groups=a , ) )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=1 , ) )
lowercase_ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self : List[str] , a : str ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : List[str] , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase_ : Optional[int] = self.conv_stem(a )
lowercase_ : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[Any] = layer_module(a )
if output_hidden_states:
lowercase_ : List[Any] = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : List[str] = torch.flatten(self.pooler(a ) , start_dim=1 )
else:
lowercase_ : Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case , )
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MobileNetVaConfig ):
'''simple docstring'''
super().__init__(a )
lowercase_ : Tuple = config.num_labels
lowercase_ : Optional[int] = MobileNetVaModel(a )
lowercase_ : Any = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : str = nn.Dropout(config.classifier_dropout_prob , inplace=a )
lowercase_ : Optional[int] = nn.Linear(a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(a , output_hidden_states=a , return_dict=a )
lowercase_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Union[str, Any] = self.classifier(self.dropout(a ) )
lowercase_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Any = "single_label_classification"
else:
lowercase_ : List[str] = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase_ : Optional[int] = MSELoss()
if self.num_labels == 1:
lowercase_ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : Optional[int] = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Any = CrossEntropyLoss()
lowercase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Union[str, Any] = BCEWithLogitsLoss()
lowercase_ : Optional[int] = loss_fct(a , a )
if not return_dict:
lowercase_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a , logits=a , hidden_states=outputs.hidden_states , )
| 640 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640 | 1 |
"""simple docstring"""
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case_ ) )
]
_UpperCAmelCase = defaultdict(snake_case_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCAmelCase = (1 << len(snake_case_ )) - 1
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple )->int:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCAmelCase = self.count_ways_until(snake_case_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] )->Optional[int]:
# Store the list of persons for each task
for i in range(len(snake_case_ ) ):
for j in task_performed[i]:
self.task[j].append(snake_case_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__A : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(lowercase ,lowercase) else "train"
UpperCAmelCase_ : Optional[int] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : Tuple = streaming
UpperCAmelCase_ : Union[str, Any] = num_proc
UpperCAmelCase_ : Union[str, Any] = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[str] = keep_in_memory
UpperCAmelCase_ : Optional[int] = streaming
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : str = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
| 455 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCAmelCase : str = random.Random()
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : int=None , _lowerCAmelCase : Tuple=None ) -> Optional[Any]:
if rng is None:
A_ : Union[str, Any] = global_rng
A_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :str , snake_case :Tuple , snake_case :Dict=7 , snake_case :int=400 , snake_case :Optional[Any]=2_000 , snake_case :str=1 , snake_case :Any=0.0 , snake_case :Tuple=16_000 , snake_case :List[Any]=True , snake_case :Tuple=80 , snake_case :Optional[Any]=16 , snake_case :Any=64 , snake_case :List[str]="hann_window" , snake_case :List[str]=80 , snake_case :Union[str, Any]=7_600 , snake_case :Union[str, Any]=1e-10 , snake_case :List[str]=True , ):
'''simple docstring'''
A_ : List[str] = parent
A_ : List[Any] = batch_size
A_ : Optional[int] = min_seq_length
A_ : Union[str, Any] = max_seq_length
A_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : Tuple = feature_size
A_ : Union[str, Any] = padding_value
A_ : Tuple = sampling_rate
A_ : Any = do_normalize
A_ : Dict = num_mel_bins
A_ : str = hop_length
A_ : int = win_length
A_ : str = win_function
A_ : List[str] = fmin
A_ : Any = fmax
A_ : Any = mel_floor
A_ : List[str] = return_attention_mask
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str=False , snake_case :Dict=False ):
'''simple docstring'''
def _flatten(snake_case :Tuple ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
A_ : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A_ : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : int = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[int]=False , snake_case :Union[str, Any]=False ):
'''simple docstring'''
if equal_length:
A_ : List[str] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ : Union[str, Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : Optional[Any] = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( A_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[Any] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : Optional[int] = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
A_ : int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
A_ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# Test batched
A_ : Any = feat_extract(_snake_case , return_tensors="np" ).input_values
A_ : Optional[Any] = feat_extract(_snake_case , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
A_ : Optional[Any] = [None, 1_600, None]
for max_length, padding in zip(_snake_case , _snake_case ):
A_ : str = feat_extract(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors="np" )
A_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Dict = range(800 , 1_400 , 200 )
A_ : str = [floats_list((1, x) )[0] for x in lengths]
A_ : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
A_ : List[str] = [None, 1_600, None]
for max_length, padding in zip(_snake_case , _snake_case ):
A_ : List[str] = feat_extract(_snake_case , max_length=_snake_case , padding=_snake_case )
A_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : Tuple = feat_extract(
_snake_case , truncation=_snake_case , max_length=1_000 , padding="max_length" , return_tensors="np" )
A_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : str = feat_extract(
_snake_case , truncation=_snake_case , max_length=1_000 , padding="longest" , return_tensors="np" )
A_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
A_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : Optional[Any] = feat_extract(
_snake_case , truncation=_snake_case , max_length=2_000 , padding="longest" , return_tensors="np" )
A_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : int = np.random.rand(100 ).astype(np.floataa )
A_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A_ : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A_ : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ : Optional[int] = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test feature size
A_ : Dict = feature_extractor(audio_target=_snake_case , padding=_snake_case , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
A_ : Any = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
A_ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# Test batched
A_ : Tuple = feature_extractor(_snake_case , return_tensors="np" ).input_values
A_ : Dict = feature_extractor(_snake_case , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A_ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ : Union[str, Any] = np.asarray(_snake_case )
A_ : Dict = feature_extractor(_snake_case , return_tensors="np" ).input_values
A_ : Union[str, Any] = feature_extractor(_snake_case , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
A_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : int = feat_extract.model_input_names[0]
A_ : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
A_ : List[str] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case )
A_ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
A_ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A_ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case )
A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Tuple = feat_extract.model_input_names[0]
A_ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
A_ : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A_ : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
A_ : List[str] = feat_extract.model_input_names[0]
A_ : int = BatchFeature({input_name: speech_inputs} )
A_ : List[Any] = feat_extract.num_mel_bins # hack!
A_ : Optional[int] = feat_extract.pad(_snake_case , padding="longest" , return_tensors="np" )[input_name]
A_ : str = feat_extract.pad(_snake_case , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = self.feat_extract_dict
A_ : Any = True
A_ : Union[str, Any] = self.feature_extraction_class(**_snake_case )
A_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
A_ : List[Any] = [len(_snake_case ) for x in speech_inputs]
A_ : Any = feat_extract.model_input_names[0]
A_ : str = BatchFeature({input_name: speech_inputs} )
A_ : str = feat_extract.num_mel_bins # hack!
A_ : Optional[Any] = feat_extract.pad(_snake_case , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[Any] = self.feat_extract_dict
A_ : Tuple = True
A_ : Optional[int] = self.feature_extraction_class(**_snake_case )
A_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
A_ : int = [len(_snake_case ) for x in speech_inputs]
A_ : Union[str, Any] = feat_extract.model_input_names[0]
A_ : Any = BatchFeature({input_name: speech_inputs} )
A_ : int = min(_snake_case )
A_ : List[str] = feat_extract.num_mel_bins # hack!
A_ : Optional[Any] = feat_extract.pad(
_snake_case , padding="max_length" , max_length=_snake_case , truncation=_snake_case , return_tensors="np" )
self.assertIn("attention_mask" , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str ):
'''simple docstring'''
from datasets import load_dataset
A_ : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A_ : Union[str, Any] = ds.sort("id" ).select(range(_snake_case ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
A_ : Union[str, Any] = self._load_datasamples(1 )
A_ : List[Any] = SpeechTaFeatureExtractor()
A_ : Any = feature_extractor(_snake_case , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _snake_case , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : str = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
A_ : Dict = self._load_datasamples(1 )
A_ : Tuple = SpeechTaFeatureExtractor()
A_ : Optional[Any] = feature_extractor(audio_target=_snake_case , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _snake_case , atol=1e-4 ) )
| 454 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowercase__ , lowercase__ : Optional[Any] = grid.shape
lowercase__ : List[str] = [-1, 1, 0, 0]
lowercase__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ : List[str] = [(0, source)], set()
lowercase__ : List[str] = np.full((rows, cols) , np.inf )
lowercase__ : Optional[int] = 0
lowercase__ : str = np.empty((rows, cols) , dtype=__lowerCamelCase )
lowercase__ : Optional[int] = None
while queue:
((lowercase__) , (lowercase__)) : Tuple = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ : Union[str, Any] = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
lowercase__ , lowercase__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
lowercase__ : Optional[Any] = dist + 1
lowercase__ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 | 0 |
"""simple docstring"""
import requests
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
lowerCAmelCase__ : Union[str, Any] = {"""Content-Type""": """application/json"""}
lowerCAmelCase__ : List[str] = requests.post(__UpperCAmelCase , json={"""text""": message_body} , headers=__UpperCAmelCase )
if response.status_code != 200:
lowerCAmelCase__ : Union[str, Any] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 507 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """sentencepiece.model"""}
_A = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_A = {
"""google/rembert""": 2_5_6,
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = VOCAB_FILES_NAMES
_lowerCamelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Tuple="[SEP]" , UpperCamelCase : Any="[UNK]" , UpperCamelCase : str="[SEP]" , UpperCamelCase : Dict="[PAD]" , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Dict="[MASK]" , **UpperCamelCase : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : str = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : str = vocab_file
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : Any , UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = d
lowerCAmelCase__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=False ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) )
return
lowerCAmelCase__ : Any = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 507 | 1 |
"""simple docstring"""
from math import factorial, radians
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : int = 18 , _lowercase : int = 10 ) ->float:
'''simple docstring'''
a : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a : Any = radians(_lowercase )
a : Optional[int] = angle_in_radians
a : Dict = 3
a : Dict = -1
for _ in range(_lowercase ):
result += (b * (angle_in_radians**a)) / factorial(_lowercase )
a : List[str] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowercase , _lowercase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 633 |
"""simple docstring"""
class __UpperCamelCase : # Public class to implement a graph
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : int = row
a : Tuple = col
a : Optional[int] = graph
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
# Checking all 8 elements surrounding nth element
a : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
a : str = [-1, 0, 1, -1, 1, -1, 0, 1]
a : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase__ )
def __a ( self ) -> int: # And finally, count all islands.
a : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
a : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += 1
return count
| 633 | 1 |
from bisect import bisect
from itertools import accumulate
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : x[0] / x[1] , reverse=UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ = list(accumulate(UpperCamelCase_ ) )
lowerCAmelCase__ = bisect(UpperCamelCase_ , UpperCamelCase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
a_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
a_ = '''|'''.join(sys.argv[1:])
a_ = re.compile(rF"^({joined_dirs}).*?\.py$")
a_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 115 | 0 |
import os
def snake_case__ ( lowercase = "input.txt" ):
with open(os.path.join(os.path.dirname(lowercase ) , lowercase ) ) as input_file:
lowerCAmelCase_: Dict = [
[int(lowercase ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCAmelCase_: int = len(lowercase )
lowerCAmelCase_: Any = len(matrix[0] )
lowerCAmelCase_: List[Any] = [[-1 for _ in range(lowercase )] for _ in range(lowercase )]
for i in range(lowercase ):
lowerCAmelCase_: Union[str, Any] = matrix[i][0]
for j in range(1 , lowercase ):
for i in range(lowercase ):
lowerCAmelCase_: str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowercase ):
lowerCAmelCase_: Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase_: Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''') | 613 | import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : List[str] = False
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
lowerCAmelCase_: List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: Tuple = "A painting of a squirrel eating a burger "
lowerCAmelCase_: List[str] = torch.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_: int = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: List[str] = generator.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _a ( self ):
lowerCAmelCase_: Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: str = "A painting of a squirrel eating a burger "
lowerCAmelCase_: int = torch.manual_seed(0 )
lowerCAmelCase_: List[str] = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCAmelCase_: List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_: Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 613 | 1 |
import os
from pathlib import Path
def UpperCAmelCase__ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
a_ = Path(_A ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
a_ = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 143 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(a__ )
class __lowercase ( a__ ):
_lowerCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *lowercase__ : Tuple , **lowercase__ : Any ):
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=lowercase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def __magic_name__ ( self : Any , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[Any] , ):
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
lowercase__ , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
a_ = handle_long_generation
preprocess_params.update(lowercase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
if len(lowercase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __magic_name__ ( self : int , *lowercase__ : int , **lowercase__ : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowercase__ , **lowercase__ )
def __call__( self : Union[str, Any] , lowercase__ : List[Any] , **lowercase__ : str ):
return super().__call__(lowercase__ , **lowercase__ )
def __magic_name__ ( self : Any , lowercase__ : List[Any] , lowercase__ : int="" , lowercase__ : Union[str, Any]=None , **lowercase__ : int ):
a_ = self.tokenizer(
prefix + prompt_text , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['''max_new_tokens''']
else:
a_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
a_ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , **lowercase__ : Tuple ):
a_ = model_inputs['''input_ids''']
a_ = model_inputs.get('''attention_mask''' , lowercase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
a_ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=lowercase__ , attention_mask=lowercase__ , **lowercase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(lowercase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(lowercase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __magic_name__ ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Dict=ReturnType.FULL_TEXT , lowercase__ : Tuple=True ):
a_ = model_outputs['''generated_sequence'''][0]
a_ = model_outputs['''input_ids''']
a_ = model_outputs['''prompt_text''']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'''generated_text''': all_text}
records.append(lowercase__ )
return records
| 143 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[False] * len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[-1] * len(__SCREAMING_SNAKE_CASE )
def dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCamelCase =True
_UpperCamelCase =c
for u in graph[v]:
if not visited[u]:
dfs(__SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(__SCREAMING_SNAKE_CASE , 0 )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCamelCase : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 404 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
__lowercase = 0
while num > 0:
__lowercase = num % 8
__lowercase = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase ) ))
counter += 1
__lowercase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowerCamelCase )}'
def snake_case ( ):
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_snake_case : str = {
# 1536-bit
5: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"""generator""": 2,
},
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] = 14 ) ->Dict:
if group not in primes:
raise ValueError("Unsupported Group" )
lowercase = primes[group]["prime"]
lowercase = primes[group]["generator"]
lowercase = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->str:
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE( self :int ) ->Tuple:
lowercase = pow(self.generator , self.__private_key , self.prime )
return hex(__SCREAMING_SNAKE_CASE )[2:]
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Dict ) ->Union[str, Any]:
return (
2 <= key <= self.prime - 2
and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Union[str, Any] ) ->Dict:
lowercase = int(__SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid public key" )
lowercase = pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) ->Any:
return (
2 <= remote_public_key_str <= prime - 2
and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE( lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] = 14 ) ->int:
lowercase = int(__SCREAMING_SNAKE_CASE , base=16 )
lowercase = int(__SCREAMING_SNAKE_CASE , base=16 )
lowercase = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid public key" )
lowercase = pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 0 |
def lowerCamelCase ( UpperCamelCase : list ) -> float:
_lowerCamelCase = 0
while len(UpperCamelCase ) > 1:
_lowerCamelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_lowerCamelCase = files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 717 | from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : str , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Any , **snake_case__ : Any ) -> int:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : List[str] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Tuple ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Dict , *snake_case__ : int , **snake_case__ : Dict ) -> str:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Union[str, Any] , *snake_case__ : Union[str, Any] , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : int , **snake_case__ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : List[str] , **snake_case__ : Dict ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case__ : int , **snake_case__ : Any ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Any ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Tuple , **snake_case__ : Any ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[str] , *snake_case__ : str , **snake_case__ : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case__ : Dict , **snake_case__ : Optional[Any] ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Optional[Any] , **snake_case__ : int ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : Dict , **snake_case__ : Optional[int] ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : Any , **snake_case__ : Dict ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ) -> List[str]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[str] , *snake_case__ : str , **snake_case__ : str ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : str , **snake_case__ : Dict ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : Any , **snake_case__ : Tuple ) -> Any:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : int ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Dict , **snake_case__ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *snake_case__ : Any , **snake_case__ : Dict ) -> Dict:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : str , **snake_case__ : str ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Optional[Any] , *snake_case__ : Dict , **snake_case__ : List[str] ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : int , **snake_case__ : List[Any] ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Tuple , *snake_case__ : List[str] , **snake_case__ : Any ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Dict , *snake_case__ : Any , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ) -> Dict:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : Optional[int] , **snake_case__ : int ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ) -> Tuple:
requires_backends(cls , ['flax'] ) | 234 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
SCREAMING_SNAKE_CASE :str = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 628 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCamelCase_ = False
if num < 0:
UpperCamelCase_ = True
UpperCamelCase_ = -num
UpperCamelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowercase = logging.getLogger()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
__lowercase = """patrickvonplaten/t5-tiny-random"""
__lowercase = """sshleifer/bart-tiny-random"""
__lowercase = """sshleifer/tiny-mbart"""
__lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowercase ( __lowerCamelCase ):
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
A_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
A_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
A_ = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(lowerCamelCase__ , lowerCamelCase__ )
A_ = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
A_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
A_ = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_generate()
assert Path(lowerCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
A_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
A_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
A_ = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
A_ = Path(self.get_auto_remove_tmp_dir() )
A_ = str(tmp_dir / '''scores.json''' )
A_ = str(tmp_dir / '''val.target''' )
_dump_articles(lowerCamelCase__ , text['''en'''] )
_dump_articles(lowerCamelCase__ , text['''de'''] )
A_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
A_ = F"\n run_eval_search.py\n {model}\n {str(lowerCamelCase__ )}\n {str(lowerCamelCase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
with CaptureStdout() as cs:
run_search()
A_ = [''' num_beams | length_penalty''', model, '''Best score args''']
A_ = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(lowerCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase__ ).exists()
os.remove(Path(lowerCamelCase__ ) )
| 702 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = len(SCREAMING_SNAKE_CASE )
A_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A_ = subset[i - 1][j]
if arr[i - 1] <= j:
A_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( __lowercase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__a = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
@property
def UpperCAmelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self ):
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def UpperCAmelCase ( self ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase = init_image.resize((128, 128) )
_UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 518 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip() | 518 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCAmelCase ( snake_case_ : Dict=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase: str = subparsers.add_parser("env" )
else:
UpperCAmelCase: int = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=snake_case_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def __UpperCAmelCase ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCAmelCase: Tuple = torch.__version__
UpperCAmelCase: Tuple = torch.cuda.is_available()
UpperCAmelCase: List[str] = is_xpu_available()
UpperCAmelCase: Optional[Any] = is_npu_available()
UpperCAmelCase: Tuple = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(snake_case_ ):
UpperCAmelCase: Optional[int] = load_config_from_file(args.config_file ).to_dict()
UpperCAmelCase: str = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"PyTorch XPU available": str(snake_case_ ),
"PyTorch NPU available": str(snake_case_ ),
"System RAM": F'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB',
}
if pt_cuda_available:
UpperCAmelCase: Optional[Any] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
UpperCAmelCase: List[Any] = (
"\n".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'\t{accelerate_config}'
)
print(snake_case_ )
UpperCAmelCase: List[str] = accelerate_config
return info
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: str = env_command_parser()
UpperCAmelCase: List[Any] = parser.parse_args()
env_command(snake_case_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 711 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Union[str, Any] = '''maskformer'''
lowerCamelCase__: Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__: Optional[int] = ['''resnet''', '''swin''']
lowerCamelCase__: Optional[int] = ['''detr''']
def __init__( self , __snake_case = 2_5_6 , __snake_case = 2_5_6 , __snake_case = 0.1 , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0.02 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 20.0 , __snake_case = None , **__snake_case , ) -> List[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase: str = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase: Any = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase: Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase: Dict = (
decoder_config.pop("model_type" ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase: Optional[Any] = config_class.from_dict(__snake_case )
UpperCAmelCase: Any = backbone_config
UpperCAmelCase: Dict = decoder_config
# main feature dimension for the model
UpperCAmelCase: Optional[int] = fpn_feature_size
UpperCAmelCase: Union[str, Any] = mask_feature_size
# initializer
UpperCAmelCase: Tuple = init_std
UpperCAmelCase: Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase: Optional[int] = cross_entropy_weight
UpperCAmelCase: List[str] = dice_weight
UpperCAmelCase: List[Any] = mask_weight
UpperCAmelCase: List[str] = use_auxiliary_loss
UpperCAmelCase: List[str] = no_object_weight
UpperCAmelCase: int = output_auxiliary_logits
UpperCAmelCase: int = self.decoder_config.encoder_attention_heads
UpperCAmelCase: Dict = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase: Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase: List[Any] = self.decoder_config.to_dict()
UpperCAmelCase: Dict = self.__class__.model_type
return output
| 166 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
a__ : Tuple = BlenderbotSmallTokenizer
a__ : List[str] = False
def __lowercase( self : str ) -> Union[str, Any]:
super().setUp()
UpperCamelCase__ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase__ : str = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) )
UpperCamelCase__ : List[str] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase__ : Dict = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def __lowercase( self : List[str], **__lowerCamelCase : Union[str, Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def __lowercase( self : Optional[Any], __lowerCamelCase : int ) -> List[Any]:
UpperCamelCase__ : List[Any] = '''adapt act apte'''
UpperCamelCase__ : Dict = '''adapt act apte'''
return input_text, output_text
def __lowercase( self : List[Any] ) -> int:
UpperCamelCase__ : int = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ : Optional[Any] = '''adapt act apte'''
UpperCamelCase__ : Optional[int] = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase__ : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def __lowercase( self : Optional[int] ) -> Union[str, Any]:
UpperCamelCase__ : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCamelCase__ : int = '''I am a small frog.'''
UpperCamelCase__ : Dict = tok([src_text], padding=_lowerCamelCase, truncation=_lowerCamelCase )['''input_ids''']
UpperCamelCase__ : Dict = tok.batch_decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase, clean_up_tokenization_spaces=_lowerCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase( self : str ) -> Any:
UpperCamelCase__ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase__ : Optional[Any] = '''I am a small frog .'''
UpperCamelCase__ : Any = '''.'''
UpperCamelCase__ : Dict = tok(_lowerCamelCase )['''input_ids''']
UpperCamelCase__ : Optional[Any] = tok(_lowerCamelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 344 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int = 1_0**1_2 ) ->int:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 161 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__lowerCamelCase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _a ( ):
a_ , a_ : List[Any] = randrange(len(__UpperCamelCase ) ), randrange(len(__UpperCamelCase ) )
a_ : List[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
a_ , a_ : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a ( __UpperCamelCase = 1_0_0 ):
return (generate_random_hand() for _ in range(__UpperCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Union[str, Any] = PokerHand(__UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
def _a ( ):
a_ : int = [PokerHand(__UpperCamelCase ) for hand in SORTED_HANDS]
a_ : Dict = poker_hands.copy()
shuffle(__UpperCamelCase )
a_ : Dict = chain(sorted(__UpperCamelCase ) )
for index, hand in enumerate(__UpperCamelCase ):
assert hand == poker_hands[index]
def _a ( ):
# Test that five high straights are compared correctly.
a_ : str = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a_ : List[str] = PokerHand("""2C 4S AS 3D 5C""" )
a_ : Dict = True
a_ : List[Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a_ : Tuple = 0
a_ : Optional[Any] = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
a_ : Optional[int] = os.path.join(__UpperCamelCase , """poker_hands.txt""" )
with open(__UpperCamelCase ) as file_hand:
for line in file_hand:
a_ : Dict = line[:1_4].strip()
a_ : int = line[1_5:].strip()
a_ , a_ : Optional[int] = PokerHand(__UpperCamelCase ), PokerHand(__UpperCamelCase )
a_ : str = player.compare_with(__UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 478 | 0 |
"""simple docstring"""
from functools import reduce
_lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase ( _A = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 264 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
_lowerCAmelCase = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
_lowerCAmelCase = {
'jukebox': 5_12,
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_LYRIC_TOKENS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self :Tuple , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :str , __magic_name__ :List[Any]=["v3", "v2", "v2"] , __magic_name__ :List[Any]=512 , __magic_name__ :Optional[Any]=5 , __magic_name__ :Tuple="<|endoftext|>" , **__magic_name__ :Tuple , ) ->Union[str, Any]:
lowercase : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
unk_token=__magic_name__ , n_genres=__magic_name__ , version=__magic_name__ , max_n_lyric_tokens=__magic_name__ , **__magic_name__ , )
lowercase : Optional[int] = version
lowercase : Optional[int] = max_n_lyric_tokens
lowercase : Optional[int] = n_genres
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : Optional[int] = json.load(__magic_name__ )
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : Any = json.load(__magic_name__ )
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : List[str] = json.load(__magic_name__ )
lowercase : str = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase : Dict = oov.replace(r"""\-'""" , r"""\-+'""" )
lowercase : Union[str, Any] = regex.compile(__magic_name__ )
lowercase : Tuple = {v: k for k, v in self.artists_encoder.items()}
lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()}
lowercase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __snake_case ( self :str ) ->Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __snake_case ( self :int ) ->Optional[int]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __snake_case ( self :str , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Optional[Any] ) ->List[str]:
lowercase : Optional[int] = [self.artists_encoder.get(__magic_name__ , 0 ) for artist in list_artists]
for genres in range(len(__magic_name__ ) ):
lowercase : int = [self.genres_encoder.get(__magic_name__ , 0 ) for genre in list_genres[genres]]
lowercase : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase : str = [[self.lyrics_encoder.get(__magic_name__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __snake_case ( self :Optional[int] , __magic_name__ :Optional[Any] ) ->List[str]:
return list(__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Tuple , __magic_name__ :List[str] , __magic_name__ :str , **__magic_name__ :Union[str, Any] ) ->Tuple:
lowercase , lowercase , lowercase : List[str] = self.prepare_for_tokenization(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : List[str] = self._tokenize(__magic_name__ )
return artist, genre, lyrics
def __snake_case ( self :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :str , __magic_name__ :bool = False ) ->Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase : Tuple = artists[idx].lower()
lowercase : Dict = [genres[idx].lower()]
else:
lowercase : str = self._normalize(artists[idx] ) + """.v2"""
lowercase : str = [
self._normalize(__magic_name__ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase : Dict = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowercase : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
lowercase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(__magic_name__ ) )}
lowercase : List[Any] = 0
lowercase : str = len(__magic_name__ ) + 1
lowercase : List[Any] = self.vocab
lowercase : Optional[Any] = {v: k for k, v in self.vocab.items()}
lowercase : Tuple = """"""
else:
lowercase : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowercase : str = self._run_strip_accents(__magic_name__ )
lowercase : str = lyrics.replace("""\\""" , """\n""" )
lowercase : int = self.out_of_vocab.sub("""""" , __magic_name__ ), [], []
return artists, genres, lyrics
def __snake_case ( self :List[Any] , __magic_name__ :Optional[Any] ) ->List[str]:
lowercase : Any = unicodedata.normalize("""NFD""" , __magic_name__ )
lowercase : Optional[Any] = []
for char in text:
lowercase : int = unicodedata.category(__magic_name__ )
if cat == "Mn":
continue
output.append(__magic_name__ )
return "".join(__magic_name__ )
def __snake_case ( self :Optional[int] , __magic_name__ :str ) ->str:
lowercase : Tuple = (
[chr(__magic_name__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__magic_name__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__magic_name__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
lowercase : Dict = frozenset(__magic_name__ )
lowercase : Optional[int] = re.compile(r"""_+""" )
lowercase : Optional[int] = """""".join([c if c in accepted else """_""" for c in text.lower()] )
lowercase : Union[str, Any] = pattern.sub("""_""" , __magic_name__ ).strip("""_""" )
return text
def __snake_case ( self :Optional[int] , __magic_name__ :List[str] ) ->str:
return " ".join(__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :bool = False ) ->Tuple:
# Convert to TensorType
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase : Optional[Any] = TensorType(__magic_name__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowercase : int = tf.constant
lowercase : List[str] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowercase : List[Any] = torch.tensor
lowercase : List[str] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowercase : int = jnp.array
lowercase : Dict = _is_jax
else:
lowercase : List[Any] = np.asarray
lowercase : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase : Dict = [inputs]
if not is_tensor(__magic_name__ ):
lowercase : str = as_tensor(__magic_name__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Optional[Any]="" , __magic_name__ :Optional[int]="pt" ) ->BatchEncoding:
lowercase : Union[str, Any] = [0, 0, 0]
lowercase : Optional[int] = [artist] * len(self.version )
lowercase : Dict = [genres] * len(self.version )
lowercase , lowercase , lowercase : Dict = self.tokenize(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase , lowercase , lowercase : Dict = self._convert_token_to_id(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Tuple = [-INFINITY] * len(full_tokens[-1] )
lowercase : Union[str, Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__magic_name__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def __snake_case ( self :int , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : int = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__magic_name__ ) )
lowercase : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__magic_name__ ) )
lowercase : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__magic_name__ ) )
return (artists_file, genres_file, lyrics_file)
def __snake_case ( self :int , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Union[str, Any] ) ->List[str]:
lowercase : Union[str, Any] = self.artists_decoder.get(__magic_name__ )
lowercase : int = [self.genres_decoder.get(__magic_name__ ) for genre in genres_index]
lowercase : Union[str, Any] = [self.lyrics_decoder.get(__magic_name__ ) for character in lyric_index]
return artist, genres, lyrics
| 264 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : int = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ["PoolFormerFeatureExtractor"]
snake_case_ : Dict = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 700 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
snake_case_ , snake_case_ : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
snake_case_ : List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
snake_case_ : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ : int = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 169 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.