code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = IFInpaintingSuperResolutionPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
A__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=0 ):
'''simple docstring'''
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : str = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 5 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 1 |
def lowercase_ ( _A : int = 2000000 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [0 for i in range(n + 1 )]
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _A ):
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Dict = 0
for i in range(_A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 5 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = JukeboxTokenizer
A__ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
import torch
lowerCamelCase__ : Union[str, Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowerCamelCase__ : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase__ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
import torch
lowerCamelCase__ : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowerCamelCase__ : int = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase__ : str = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 5 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : int = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
A : Optional[int] = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
A : Optional[Any] = 0
A : Any = 0XE000
A : List[str] = 0XE001
A : Dict = 0XE002
A : Optional[int] = 0XE003
A : int = 0XE004
# Maps special codepoints to human-readable names.
A : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
A : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=chr(__lowerCamelCase ) , __lowerCamelCase : Union[str, Any]=chr(__lowerCamelCase ) , __lowerCamelCase : int=chr(__lowerCamelCase ) , __lowerCamelCase : str=chr(__lowerCamelCase ) , __lowerCamelCase : Any=chr(__lowerCamelCase ) , __lowerCamelCase : str=chr(__lowerCamelCase ) , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=2048 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase__ : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase__ : Any = UNICODE_VOCAB_SIZE
lowerCamelCase__ : str = len(self._special_codepoints )
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._unicode_vocab_size
def lowerCAmelCase ( self : int , __lowerCamelCase : str ):
'''simple docstring'''
return list(__lowerCamelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str ):
'''simple docstring'''
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
return "".join(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : str = [self.cls_token_id]
lowerCamelCase__ : Tuple = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : int = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
lowerCamelCase__ : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 5 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
A : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = {}
with open(_A , "r" ) as file:
for line_number, line in enumerate(_A ):
lowerCamelCase__ : int = line.strip()
if line:
lowerCamelCase__ : Any = line.split()
lowerCamelCase__ : Any = line_number
lowerCamelCase__ : List[Any] = words[0]
lowerCamelCase__ : int = value
return result
def lowercase_ ( _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__ : Any = getattr(_A , _A )
lowerCamelCase__ : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
lowerCamelCase__ : int = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCamelCase__ : Tuple = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Union[str, Any] = getattr(_A , _A ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Tuple = hf_pointer
for attribute in hf_param_name.split("." ):
lowerCamelCase__ : Tuple = getattr(_A , _A )
lowerCamelCase__ : List[str] = shape_pointer.shape
# let's reduce dimension
lowerCamelCase__ : List[str] = value[0]
else:
lowerCamelCase__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase__ : Dict = value
elif weight_type == "weight_g":
lowerCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCamelCase__ : int = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowerCamelCase__ : Any = getattr(_A , _A )
lowerCamelCase__ : int = value
else:
lowerCamelCase__ : Dict = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase_ ( _A : List[Any] , _A : Any , _A : List[Any] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
lowerCamelCase__ : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCamelCase__ : int = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Union[str, Any] = ".".join([key, hf_param_name] )
else:
lowerCamelCase__ : str = key
lowerCamelCase__ : List[str] = value if "lm_head" in full_key else value[0]
A : int = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase_ ( _A : Optional[int] , _A : Any , _A : Optional[Any]=None , _A : Tuple=None ):
"""simple docstring"""
lowerCamelCase__ : int = False
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__ : Tuple = True
if "*" in mapped_key:
lowerCamelCase__ : Any = name.split(_A )[0].split("." )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace("*" , _A )
if "weight_g" in name:
lowerCamelCase__ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ : List[Any] = "weight_v"
elif "bias" in name:
lowerCamelCase__ : List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Optional[Any] = "weight"
else:
lowerCamelCase__ : Tuple = None
if hf_dict is not None:
rename_dict(_A , _A , _A , _A , _A )
else:
set_recursively(_A , _A , _A , _A , _A )
return is_used
return is_used
def lowercase_ ( _A : Dict , _A : int , _A : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Union[str, Any] = fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__ : Optional[int] = True
else:
lowerCamelCase__ : Optional[int] = load_wavaveca_layer(_A , _A , _A )
if not is_used:
unused_weights.append(_A )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase_ ( _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int = full_name.split("conv_layers." )[-1]
lowerCamelCase__ : Optional[Any] = name.split("." )
lowerCamelCase__ : Any = int(items[0] )
lowerCamelCase__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase__ : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase__ : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowercase_ ( _A : int , _A : Any , _A : Any=None , _A : List[Any]=None , _A : Tuple=True , _A : List[Any]=False ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained(_A )
else:
lowerCamelCase__ : str = WavaVecaConfig()
if is_seq_class:
lowerCamelCase__ : str = read_txt_into_dict(_A )
lowerCamelCase__ : List[Any] = idalabel
lowerCamelCase__ : List[Any] = WavaVecaForSequenceClassification(_A )
lowerCamelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
lowerCamelCase__ : Dict = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[Any] = target_dict.pad_index
lowerCamelCase__ : str = target_dict.bos_index
lowerCamelCase__ : str = target_dict.eos_index
lowerCamelCase__ : Tuple = len(target_dict.symbols )
lowerCamelCase__ : Optional[Any] = os.path.join(_A , "vocab.json" )
if not os.path.isdir(_A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
lowerCamelCase__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = 1
with open(_A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_A , _A )
lowerCamelCase__ : Any = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_A , )
lowerCamelCase__ : Dict = True if config.feat_extract_norm == "layer" else False
lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
lowerCamelCase__ : Any = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
lowerCamelCase__ : int = WavaVecaForCTC(_A )
else:
lowerCamelCase__ : Optional[Any] = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase__ : Dict = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase__ : Any = fairseq.tasks.setup_task(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
lowerCamelCase__ : List[str] = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
A : Optional[Any] = parser.parse_args()
A : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
A : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : int = [{"type": "code", "content": INSTALL_CONTENT}]
A : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 5 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = BarthezTokenizer
A__ = BarthezTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "<pad>"
lowerCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 101122 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2]
lowerCamelCase__ : Tuple = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 5 | 1 |
def lowercase_ ( _A : int = 3 , _A : int = 7 , _A : int = 1000000 ):
"""simple docstring"""
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase__ : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase__ : str = current_numerator
lowerCamelCase__ : Tuple = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 5 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = ["pixel_values"]
def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : str = size if size is not None else {"height": 256, "width": 256}
lowerCamelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__ : Optional[Any] = get_size_dict(__lowerCamelCase , param_name="crop_size" )
lowerCamelCase__ : Dict = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : str = resample
lowerCamelCase__ : List[Any] = do_center_crop
lowerCamelCase__ : Union[str, Any] = crop_size
lowerCamelCase__ : Optional[Any] = do_rescale
lowerCamelCase__ : Union[str, Any] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
__lowerCamelCase , size=(size["height"], size["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : str = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : str=None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Dict , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Dict = resample if resample is not None else self.resample
lowerCamelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Tuple = size if size is not None else self.size
lowerCamelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
lowerCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Optional[int] = get_size_dict(__lowerCamelCase , param_name="crop_size" )
lowerCamelCase__ : Dict = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Union[str, Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Tuple = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : str = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
lowerCamelCase__ : Tuple = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 5 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 1 |
import math
import random
def lowercase_ ( _A : float , _A : bool = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
A : List[Any] = 0.0_2
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_A ):
# Forward propagation
lowerCamelCase__ : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase__ : Any = (expected / 100) - layer_a
# Error delta
lowerCamelCase__ : Any = layer_1_error * sigmoid_function(_A , _A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
A : List[Any] = int(input("Expected value: "))
A : List[str] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 5 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
A : Optional[int] = datasets.utils.logging.get_logger(__name__)
class _lowercase ( folder_based_builder.FolderBasedBuilderConfig):
"""simple docstring"""
A__ = None
A__ = None
class _lowercase ( folder_based_builder.FolderBasedBuilder):
"""simple docstring"""
A__ = datasets.Audio()
A__ = "audio"
A__ = AudioFolderConfig
A__ = 42 # definition at the bottom of the script
A__ = AudioClassification(audio_column="audio" , label_column="label")
A : Any = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
A : Dict = AUDIO_EXTENSIONS
| 5 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowercase ( lowercase__):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int ):
'''simple docstring'''
if tokenize_kwargs is None:
lowerCamelCase__ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowerCamelCase__ : List[str] = truncation
lowerCamelCase__ : Optional[Any] = tokenize_kwargs
lowerCamelCase__ : Optional[Any] = {}
if return_tensors is not None:
lowerCamelCase__ : str = return_tensors
return preprocess_params, {}, postprocess_params
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.framework
lowerCamelCase__ : List[Any] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
return model_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model(**__lowerCamelCase )
return model_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
| 5 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
A : List[str] = logging.get_logger(__name__)
A : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A : Union[str, Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Optional[int] = {
"allenai/led-base-16384": 16384,
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = LEDTokenizer
A__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="replace" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=True , **__lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : List[str] = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : str = pre_tok_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__ : Dict = "post_processor"
lowerCamelCase__ : List[Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : List[str] = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase__ : List[str] = tuple(state["cls"] )
lowerCamelCase__ : str = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
lowerCamelCase__ : int = trim_offsets
lowerCamelCase__ : Any = True
if changes_to_apply:
lowerCamelCase__ : Union[str, Any] = getattr(__lowerCamelCase , state.pop("type" ) )
lowerCamelCase__ : Union[str, Any] = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
lowerCamelCase__ : Optional[Any] = value
def lowerCAmelCase ( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : int = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5 | 1 |
from manim import *
class _lowercase ( lowercase__):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
lowerCamelCase__ : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
lowerCamelCase__ : Tuple = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
lowerCamelCase__ : Optional[int] = Text("CPU" , font_size=24 )
lowerCamelCase__ : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(1 )]
lowerCamelCase__ : Any = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
lowerCamelCase__ : int = Text("GPU" , font_size=24 )
lowerCamelCase__ : Union[str, Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.align_to(__lowerCamelCase , __lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowerCamelCase )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
lowerCamelCase__ : int = Text("Model" , font_size=24 )
lowerCamelCase__ : Optional[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) , )
lowerCamelCase__ : Union[str, Any] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
lowerCamelCase__ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : Tuple = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase , run_time=2.5 ) , Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.add(__lowerCamelCase )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[Any] = []
for i, rect in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
cpu_target.move_to(__lowerCamelCase )
cpu_target.generate_target()
lowerCamelCase__ : Any = 0.4_6 / 4
lowerCamelCase__ : str = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowerCamelCase , buff=0.0 )
cpu_targs.append(__lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowerCamelCase ) )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait()
| 5 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( lowercase__):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowerCAmelCase ( __lowerCamelCase : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError()
| 5 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : List[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Tuple = inputs_dict["labels"]
lowerCamelCase__ : Any = inputs_dict["labels"]
lowerCamelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
lowerCamelCase__ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A : Tuple = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : str = [image]
lowerCamelCase__ : Tuple = [trans(img.convert("RGB" ) ) for img in image]
lowerCamelCase__ : int = torch.stack(_A )
return image
class _lowercase ( lowercase__):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : Any ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = min(int(num_inference_steps * strength ) , __lowerCamelCase )
lowerCamelCase__ : int = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase__ : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}" )
lowerCamelCase__ : List[Any] = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCamelCase__ : Optional[int] = init_latents.shape
lowerCamelCase__ : List[str] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print("add noise to latents at timestep" , __lowerCamelCase )
lowerCamelCase__ : int = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowerCamelCase : float = 0.8 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
'''simple docstring'''
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
lowerCamelCase__ : List[Any] = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
lowerCamelCase__ : List[str] = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
lowerCamelCase__ : Optional[Any] = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
lowerCamelCase__ : Tuple = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
lowerCamelCase__ : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ : Union[str, Any] = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
lowerCamelCase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 5 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "roberta"
def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : int = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 1 |
from collections import defaultdict
def lowercase_ ( _A : str , _A : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = first_str.lower().strip()
lowerCamelCase__ : Optional[Any] = second_str.lower().strip()
# Remove whitespace
lowerCamelCase__ : Tuple = first_str.replace(" " , "" )
lowerCamelCase__ : str = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_A ) != len(_A ):
return False
# Default values for count should be 0
lowerCamelCase__ : defaultdict[str, int] = defaultdict(_A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A : Dict = input("Enter the first string ").strip()
A : int = input("Enter the second string ").strip()
A : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 5 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=33 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=None , ):
'''simple docstring'''
lowerCamelCase__ : str = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Union[str, Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : int = num_choices
lowerCamelCase__ : int = scope
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = EsmModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = EsmForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Optional[int] = EsmForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = False
A__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = ()
A__ = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = EsmModelTester(self )
lowerCamelCase__ : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : List[Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = EsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : List[str] = EsmEmbeddings(config=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase__ : Dict = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase__ : Any = create_position_ids_from_input_ids(__lowerCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : Any = EsmEmbeddings(config=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = torch.empty(2 , 4 , 30 )
lowerCamelCase__ : Union[str, Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase__ : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(__lowerCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@require_torch
class _lowercase ( lowercase__):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
lowerCamelCase__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : int = model(__lowerCamelCase )[0]
lowerCamelCase__ : Dict = 33
lowerCamelCase__ : int = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Any = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase__ : int = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 5 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase__ : Any = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Any = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = set()
lowerCamelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Any = char
return pairs
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : List[Any] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Dict = tuple(__lowerCamelCase )
lowerCamelCase__ : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase )
lowerCamelCase__ : Dict = word
return word
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase )
lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
lowerCamelCase__ : Tuple = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase__ : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Dict = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 | 1 |
from __future__ import annotations
def lowercase_ ( _A : Optional[int] , _A : int ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowercase_ ( _A : Union[str, Any] , _A : Tuple , _A : List[str] ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowercase_ ( _A : int , _A : str , _A : List[Any] , _A : Dict ):
lowerCamelCase__ : Tuple = [float("inf" )] * vertex_count
lowerCamelCase__ : int = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowerCamelCase__ : int = distance[u] + w
lowerCamelCase__ : Any = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
A : int = int(input("Enter number of vertices: ").strip())
A : int = int(input("Enter number of edges: ").strip())
A : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
A : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
A : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
A : Optional[Any] = int(input("\nEnter shortest path source:").strip())
A : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 700 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = KandinskyVaaImgaImgPipeline
A__ = ["image_embeds", "negative_image_embeds", "image"]
A__ = [
"image_embeds",
"negative_image_embeds",
"image",
]
A__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.dummy_unet
lowerCamelCase__ : Optional[Any] = self.dummy_movq
lowerCamelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "cpu"
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase__ : Any = "A red cartoon frog, 4k"
lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase__ : Optional[Any] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
A : Optional[Any] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
A : List[str] = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase__ : List[Any] = state_dict.pop(__A )
# emb -> embedding
if name.startswith("emb." ):
lowerCamelCase__ : Union[str, Any] = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
lowerCamelCase__ : Union[str, Any] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
lowerCamelCase__ : int = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , __A )
# ffn -> feed_forward
lowerCamelCase__ : Dict = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , __A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
lowerCamelCase__ : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
lowerCamelCase__ : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
lowerCamelCase__ : Optional[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
lowerCamelCase__ : Any = "rwkv." + name
lowerCamelCase__ : Union[str, Any] = weight
return state_dict
def lowercase_ ( _A : List[Any] , _A : List[str] , _A : int , _A : Dict=None , _A : Tuple=None , _A : Union[str, Any]=False , _A : str=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
lowerCamelCase__ : str = 50277
lowerCamelCase__ : int = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
lowerCamelCase__ : Tuple = PreTrainedTokenizerFast(tokenizer_file=__A )
lowerCamelCase__ : Any = len(__A )
tokenizer.save_pretrained(__A )
# 2. Build the config
lowerCamelCase__ : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase__ : Optional[int] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowerCamelCase__ : Dict = RwkvConfig(
vocab_size=__A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__A )
# 3. Download model file then convert state_dict
lowerCamelCase__ : Tuple = hf_hub_download(__A , __A )
lowerCamelCase__ : List[str] = torch.load(__A , map_location="cpu" )
lowerCamelCase__ : List[str] = convert_state_dict(__A )
# 4. Split in shards and save
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = shard_checkpoint(__A )
for shard_file, shard in shards.items():
torch.save(__A , os.path.join(__A , __A ) )
if index is not None:
lowerCamelCase__ : Tuple = os.path.join(__A , __A )
# Save the index as well
with open(__A , "w" , encoding="utf-8" ) as f:
lowerCamelCase__ : Union[str, Any] = json.dumps(__A , indent=2 , sort_keys=__A ) + "\n"
f.write(__A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
lowerCamelCase__ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase__ : Union[str, Any] = torch.load(os.path.join(__A , __A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__A , __A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
lowerCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(__A )
model.push_to_hub(__A , max_shard_size="2GB" )
tokenizer.push_to_hub(__A )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
A : Optional[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A : Dict = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def lowercase_ ( _A : Any , _A : List[str] ):
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[int] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def lowercase_ ( _A : List[str] , _A : List[Any] ):
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowercase_ ( _A : Any , _A : int , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowercase_ ( _A : Optional[Any] , _A : Union[str, Any] , _A : Any ):
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def lowercase_ ( _A : int , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Dict = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def lowercase_ ( _A : Optional[int] , _A : Optional[Any] , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
lowerCamelCase__ : int = expected_configs[0]
assert expected_config in infos
lowerCamelCase__ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowercase_ ( _A : Union[str, Any] , _A : int , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[str] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
lowerCamelCase__ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowercase_ ( _A : List[Any] , _A : Union[str, Any] , _A : int ):
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _lowercase ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
A__ = "swinv2"
A__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , __lowerCamelCase : Any=224 , __lowerCamelCase : str=4 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=96 , __lowerCamelCase : Optional[Any]=[2, 2, 6, 2] , __lowerCamelCase : int=[3, 6, 12, 24] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : str=4.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : List[Any]=32 , **__lowerCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**A_ )
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : Any = depths
lowerCamelCase__ : str = len(A_ )
lowerCamelCase__ : str = num_heads
lowerCamelCase__ : Dict = window_size
lowerCamelCase__ : Tuple = mlp_ratio
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = drop_path_rate
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = use_absolute_embeddings
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : List[Any] = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase__ : str = (0, 0, 0, 0)
| 703 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 0 |
from __future__ import annotations
import math
def lowercase_ ( _A : Tuple , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : str = u
for i in range(1 , _lowercase ):
lowerCamelCase__ : str = temp * (u - i)
return temp
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = int(input("enter the numbers of values: " ) )
lowerCamelCase__ : list[list[float]] = []
for _ in range(_lowercase ):
y.append([] )
for i in range(_lowercase ):
for j in range(_lowercase ):
y[i].append(_lowercase )
lowerCamelCase__ : List[str] = 0
print("enter the values of parameters in a list: " )
lowerCamelCase__ : Dict = list(map(_lowercase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_lowercase ):
lowerCamelCase__ : Union[str, Any] = float(input() )
lowerCamelCase__ : str = int(input("enter the value to interpolate: " ) )
lowerCamelCase__ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase ):
for j in range(n - i ):
lowerCamelCase__ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase__ : List[str] = y[0][0]
for i in range(1 , _lowercase ):
summ += (ucal(_lowercase , _lowercase ) * y[0][i]) / math.factorial(_lowercase )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 0 |
from __future__ import annotations
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : int = 0 ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = key
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase_ ) ^ key ) for ch in content]
def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase_ ) ^ key ) for ch in content]
def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase__ : Union[str, Any] = ''
for ch in content:
ans += chr(ord(UpperCAmelCase_ ) ^ key )
return ans
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase__ : Dict = ''
for ch in content:
ans += chr(ord(UpperCAmelCase_ ) ^ key )
return ans
def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
try:
with open(UpperCAmelCase_ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCAmelCase_ , UpperCAmelCase_ ) )
except OSError:
return False
return True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
try:
with open(UpperCAmelCase_ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCAmelCase_ , UpperCAmelCase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : List[str] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ["PoolFormerFeatureExtractor"]
A : int = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowercase ( __A):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : UNetaDModel , __lowerCamelCase : UNetaDModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : str = value_function
lowerCamelCase__ : Union[str, Any] = unet
lowerCamelCase__ : List[str] = scheduler
lowerCamelCase__ : Optional[Any] = env
lowerCamelCase__ : Any = env.get_dataset()
lowerCamelCase__ : Tuple = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Any = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Tuple = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = env.observation_space.shape[0]
lowerCamelCase__ : Dict = env.action_space.shape[0]
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase ( self : str , __lowerCamelCase : str ):
'''simple docstring'''
if type(__lowerCamelCase ) is dict:
return {k: self.to_torch(__lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowerCamelCase , device=self.unet.device )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Dict = val.clone()
return x_in
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = x.shape[0]
lowerCamelCase__ : str = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : List[Any] = torch.full((batch_size,) , __lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : Dict = self.value_function(x.permute(0 , 2 , 1 ) , __lowerCamelCase ).sample
lowerCamelCase__ : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCamelCase__ : Union[str, Any] = self.scheduler._get_variance(__lowerCamelCase )
lowerCamelCase__ : Any = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : str = model_std * grad
lowerCamelCase__ : Any = 0
lowerCamelCase__ : List[Any] = x.detach()
lowerCamelCase__ : Optional[Any] = x + scale * grad
lowerCamelCase__ : Any = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
lowerCamelCase__ : Dict = self.unet(x.permute(0 , 2 , 1 ) , __lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , predict_epsilon=__lowerCamelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Tuple = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
lowerCamelCase__ : Any = self.to_torch(__lowerCamelCase )
return x, y
def __call__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=64 , __lowerCamelCase : Dict=32 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.normalize(__lowerCamelCase , "observations" )
lowerCamelCase__ : Dict = obs[None].repeat(__lowerCamelCase , axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(__lowerCamelCase )}
lowerCamelCase__ : Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : Tuple = randn_tensor(__lowerCamelCase , device=self.unet.device )
lowerCamelCase__ : Optional[Any] = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
lowerCamelCase__ : Dict = self.to_torch(__lowerCamelCase )
# run the diffusion process
lowerCamelCase__ : Tuple = self.run_diffusion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# sort output trajectories by value
lowerCamelCase__ : int = y.argsort(0 , descending=__lowerCamelCase ).squeeze()
lowerCamelCase__ : Union[str, Any] = x[sorted_idx]
lowerCamelCase__ : Optional[int] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : str = actions.detach().cpu().numpy()
lowerCamelCase__ : List[Any] = self.de_normalize(__lowerCamelCase , key="actions" )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : List[str] = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : List[str] = np.random.randint(0 , __lowerCamelCase )
lowerCamelCase__ : List[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
lowerCamelCase__ : List[Any] = len(bin(__lowerCAmelCase )[3:] )
lowerCamelCase__ : Tuple = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[Any] = (
(
"1"
+ "0" * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = BarthezTokenizer
A__ = BarthezTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "<pad>"
lowerCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 101122 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2]
lowerCamelCase__ : Tuple = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 5 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A : str = logging.get_logger(__name__)
def lowercase_ ( _A : Optional[Any] , _A : str , _A : Any , _A : str ):
"""simple docstring"""
def constraint_to_multiple_of(_A : Optional[Any] , _A : Tuple , _A : int=0 , _A : List[Any]=None ):
lowerCamelCase__ : str = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCamelCase__ : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCamelCase__ : int = math.ceil(val / multiple ) * multiple
return x
lowerCamelCase__ : List[str] = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else output_size
lowerCamelCase__ : Tuple = get_image_size(lowerCamelCase__ )
lowerCamelCase__ : Any = output_size
# determine new height and width
lowerCamelCase__ : int = output_height / input_height
lowerCamelCase__ : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCamelCase__ : List[str] = scale_width
else:
# fit height
lowerCamelCase__ : Union[str, Any] = scale_height
lowerCamelCase__ : str = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__ )
lowerCamelCase__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__ )
return (new_height, new_width)
class _lowercase ( lowercase__):
A__ = ["pixel_values"]
def __init__( self : Tuple , __lowerCamelCase : List[str] = True , __lowerCamelCase : Any = None , __lowerCamelCase : int = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : Optional[Any] = 1 , __lowerCamelCase : Any = True , __lowerCamelCase : Dict = 1 / 255 , __lowerCamelCase : Tuple = True , __lowerCamelCase : str = None , __lowerCamelCase : Optional[Any] = None , **__lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
lowerCamelCase__ : Optional[Any] = size if size is not None else {"height": 384, "width": 384}
lowerCamelCase__ : int = get_size_dict(UpperCamelCase_ )
lowerCamelCase__ : str = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Dict = keep_aspect_ratio
lowerCamelCase__ : Optional[Any] = ensure_multiple_of
lowerCamelCase__ : List[Any] = resample
lowerCamelCase__ : str = do_rescale
lowerCamelCase__ : str = rescale_factor
lowerCamelCase__ : Tuple = do_normalize
lowerCamelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Union[str, Any] = PILImageResampling.BICUBIC , __lowerCamelCase : List[str] = None , **__lowerCamelCase : Any , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCamelCase__ : Optional[Any] = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] = None , **__lowerCamelCase : str , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = None , __lowerCamelCase : Dict = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : str = None , __lowerCamelCase : int = None , __lowerCamelCase : Any = ChannelDimension.FIRST , **__lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : List[str] = size if size is not None else self.size
lowerCamelCase__ : Any = get_size_dict(UpperCamelCase_ )
lowerCamelCase__ : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCamelCase__ : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : int = image_std if image_std is not None else self.image_std
lowerCamelCase__ : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : List[str] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowerCamelCase__ : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCamelCase_ ):
lowerCamelCase__ : List[str] = target_sizes.numpy()
lowerCamelCase__ : Optional[int] = []
for idx in range(len(UpperCamelCase_ ) ):
lowerCamelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase_ )
lowerCamelCase__ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
lowerCamelCase__ : int = logits.argmax(dim=1 )
lowerCamelCase__ : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A : Optional[int] = logging.get_logger(__name__)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
A__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.task_name.lower()
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
A__ = "test"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Any , __lowerCamelCase : GlueDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizerBase , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __A , )
lowerCamelCase__ : int = args
lowerCamelCase__ : int = glue_processors[args.task_name]()
lowerCamelCase__ : int = glue_output_modes[args.task_name]
if isinstance(__A , __A ):
try:
lowerCamelCase__ : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
lowerCamelCase__ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
lowerCamelCase__ : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ : Dict = label_list[2], label_list[1]
lowerCamelCase__ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
lowerCamelCase__ : Any = time.time()
lowerCamelCase__ : Dict = torch.load(__A )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
lowerCamelCase__ : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ : Union[str, Any] = examples[:limit_length]
lowerCamelCase__ : List[str] = glue_convert_examples_to_features(
__A , __A , max_length=args.max_seq_length , label_list=__A , output_mode=self.output_mode , )
lowerCamelCase__ : int = time.time()
torch.save(self.features , __A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : str , __lowerCamelCase : str ):
'''simple docstring'''
return self.features[i]
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.label_list
| 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( UpperCamelCase_ , unittest.TestCase):
"""simple docstring"""
A__ = CodeGenTokenizer
A__ = CodeGenTokenizerFast
A__ = True
A__ = {"""add_prefix_space""": True}
A__ = False
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowerCamelCase__ : Any = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase__ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def lowerCAmelCase ( self : Any , **__lowerCamelCase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : Tuple , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = """lower newer"""
lowerCamelCase__ : Optional[int] = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : Tuple = """lower newer"""
lowerCamelCase__ : List[Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ : Optional[int] = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
lowerCamelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : str = self.get_rust_tokenizer(add_prefix_space=_a )
lowerCamelCase__ : str = """lower newer"""
# Testing tokenization
lowerCamelCase__ : Optional[int] = tokenizer.tokenize(_a , add_prefix_space=_a )
lowerCamelCase__ : int = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
lowerCamelCase__ : int = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
lowerCamelCase__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_a )
lowerCamelCase__ : List[str] = tokenizer.encode(_a , add_prefix_space=_a )
lowerCamelCase__ : str = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
lowerCamelCase__ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : Tuple , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
lowerCamelCase__ : int = """This is a simple input"""
lowerCamelCase__ : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase__ : Optional[Any] = ("""This is a simple input""", """This is a pair""")
lowerCamelCase__ : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCamelCase__ : Dict = """This is a simple input"""
lowerCamelCase__ : Optional[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowerCamelCase__ : List[str] = ("""This is a simple input""", """This is a pair""")
lowerCamelCase__ : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowerCamelCase__ : List[Any] = tokenizer.pad_token_id
lowerCamelCase__ : List[Any] = tokenizer(_a , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCamelCase__ : List[Any] = tokenizer(_a , padding=_a , truncate=_a , return_tensors="np" )
lowerCamelCase__ : List[str] = tokenizer(*_a , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCamelCase__ : Tuple = tokenizer(_a , padding=_a , truncate=_a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = """$$$"""
lowerCamelCase__ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
lowerCamelCase__ : Union[str, Any] = """This is a simple input"""
lowerCamelCase__ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase__ : Optional[Any] = tokenizer.bos_token_id
lowerCamelCase__ : List[Any] = tokenizer(_a )
lowerCamelCase__ : Tuple = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCamelCase__ : Tuple = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowerCamelCase__ : Tuple = """\nif len_a > len_b: result = a\nelse: result = b"""
lowerCamelCase__ : List[Any] = tokenizer.encode(_a )
lowerCamelCase__ : Dict = ["""^#""", re.escape("<|endoftext|>" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowerCamelCase__ : int = tokenizer.decode(_a , truncate_before_pattern=_a )
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowercase_ ( _A : Optional[int] , _A : Any ):
lowerCamelCase__ : Dict = Mock()
lowerCamelCase__ : str = conn, Mock()
lowerCamelCase__ : Optional[Any] = iter([1, None] )
lowerCamelCase__ : Union[str, Any] = lambda _A : next(__UpperCAmelCase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=__UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A : int = TypeVar("KEY")
A : Any = TypeVar("VAL")
@dataclass(frozen=lowercase__ , slots=lowercase__)
class _lowercase ( Generic[KEY, VAL]):
"""simple docstring"""
A__ = 42
A__ = 42
class _lowercase ( _Item):
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __bool__( self : List[Any] ):
'''simple docstring'''
return False
A : Any = _DeletedItem()
class _lowercase ( MutableMapping[KEY, VAL]):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int = 8 , __lowerCamelCase : float = 0.7_5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = initial_block_size
lowerCamelCase__ : int = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : Tuple = capacity_factor
lowerCamelCase__ : List[Any] = 0
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : KEY ):
'''simple docstring'''
return hash(_UpperCAmelCase ) % len(self._buckets )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : KEY , __lowerCamelCase : VAL ):
'''simple docstring'''
lowerCamelCase__ : str = self._buckets[ind]
if not stored:
lowerCamelCase__ : Optional[int] = _Item(_UpperCAmelCase , _UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : str = _Item(_UpperCAmelCase , _UpperCAmelCase )
return True
else:
return False
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets
lowerCamelCase__ : int = [None] * new_size
lowerCamelCase__ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : KEY ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._get_bucket_index(_UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Dict = self._get_next_ind(_UpperCAmelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : KEY , __lowerCamelCase : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(_UpperCAmelCase ):
if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
break
def __setitem__( self : List[str] , __lowerCamelCase : KEY , __lowerCamelCase : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(_UpperCAmelCase , _UpperCAmelCase )
def __delitem__( self : Any , __lowerCamelCase : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(_UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : int , __lowerCamelCase : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(_UpperCAmelCase ):
lowerCamelCase__ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCAmelCase )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = " ,".join(
f"{item.key}: {item.val}" for item in self._buckets if item )
return f"HashMap({val_string})"
| 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase_ ( _A : Optional[int] , _A : List[str] ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : List[str] = torch.permute(__A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__A ):
# linear layer
lowerCamelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Dict = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowercase_ ( _A : List[Any] , _A : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
if "metadata" in layer:
lowerCamelCase__ : List[str] = layer.split("metadata" )
lowerCamelCase__ : List[str] = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : Tuple = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
lowerCamelCase__ : Union[str, Any] = layer.split("kvstore" )
lowerCamelCase__ : Any = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
lowerCamelCase__ : Any = layer.split("/" )
lowerCamelCase__ : Dict = """/""".join(split_layer[:-1] )
lowerCamelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : str = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
lowerCamelCase__ : Optional[int] = """file"""
else:
lowerCamelCase__ : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase_ ( _A : Union[str, Any] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = rename_keys(__A )
lowerCamelCase__ : Optional[Any] = {}
for k, v in current_block.items():
lowerCamelCase__ : Optional[Any] = v
lowerCamelCase__ : Dict = new_current_block
torch.save(__A , __A )
def lowercase_ ( _A : Union[str, Any] , _A : Any , _A : Any , _A : str , _A : int = WEIGHTS_NAME ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = convert_file_size_to_int(__A )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = 0
os.makedirs(__A , exist_ok=__A )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
lowerCamelCase__ : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowerCamelCase__ : Any = flatten_dict(__A , sep="/" )
lowerCamelCase__ : int = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ : Union[str, Any] = get_key_and_tensorstore_dict(
__A , __A , __A )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : Tuple = content
else:
lowerCamelCase__ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : Dict = torch.tensor(__A )
lowerCamelCase__ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , __A )
lowerCamelCase__ : List[Any] = """/""".join(__A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : List[str] = os.path.join(
__A , weights_name.replace(".bin" , F"-{len(__A )+1:05d}-of-???.bin" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = raw_weights.to(getattr(__A , __A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : Any = os.path.join(__A , weights_name.replace(".bin" , F"-{len(__A )+1:05d}-of-???.bin" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[str] = {}
for idx, shard in enumerate(__A ):
lowerCamelCase__ : Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(__A ):05d}.bin" ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : Tuple = os.path.join(__A , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(__A , os.path.join(__A , __A ) )
lowerCamelCase__ : Optional[Any] = shard
for key in shard:
lowerCamelCase__ : List[str] = shard_file
# Add the metadata
lowerCamelCase__ : List[str] = {"""total_size""": total_size}
lowerCamelCase__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__A , __A ) , "w" , encoding="utf-8" ) as f:
lowerCamelCase__ : Optional[int] = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n"""
f.write(__A )
return metadata, index
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
lowerCamelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
lowerCamelCase__ : List[str] = TaTokenizer.from_pretrained("t5-small" )
lowerCamelCase__ : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowerCamelCase__ : Tuple = tokenizer(__A , return_tensors="pt" ).input_ids
lowerCamelCase__ : str = model.generate(__A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowercase :
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( *__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=_A , image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"score": ANY(_A ),
"label": ANY(_A ),
"box": {"xmin": ANY(_A ), "ymin": ANY(_A ), "xmax": ANY(_A ), "ymax": ANY(_A )},
} , )
import datasets
lowerCamelCase__ : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCamelCase__ : Optional[int] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCamelCase__ : Dict = object_detector(_A , threshold=0.0 )
self.assertEqual(len(_A ) , len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"score": ANY(_A ),
"label": ANY(_A ),
"box": {"xmin": ANY(_A ), "ymin": ANY(_A ), "xmax": ANY(_A ), "ymax": ANY(_A )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@require_torch
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCamelCase__ : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
lowerCamelCase__ : int = AutoFeatureExtractor.from_pretrained(_A )
lowerCamelCase__ : Tuple = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
lowerCamelCase__ : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCamelCase__ : Optional[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'facebook/detr-resnet-50'
lowerCamelCase__ : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
lowerCamelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
lowerCamelCase__ : List[str] = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
lowerCamelCase__ : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCamelCase__ : Optional[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'facebook/detr-resnet-50'
lowerCamelCase__ : Dict = pipeline("object-detection" , model=_A )
lowerCamelCase__ : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCamelCase__ : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 0.9_9_8_5
lowerCamelCase__ : str = 'facebook/detr-resnet-50'
lowerCamelCase__ : Optional[int] = pipeline("object-detection" , model=_A )
lowerCamelCase__ : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_A )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCamelCase__ : Tuple = 0.9_9_9_3
lowerCamelCase__ : str = pipeline("object-detection" , model=_A , threshold=_A )
lowerCamelCase__ : Any = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 715 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCamelCase , max_perimeter + 1 ):
lowerCamelCase__ : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowercase_ ( _A : Any = 1000 ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = pythagorean_triple(_lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase_ ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ):
"""simple docstring"""
lowerCamelCase__ : int = coefficient_matrix.shape
lowerCamelCase__ : str = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase__ : str = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a_ )
if colsa != 1:
lowerCamelCase__ : Optional[int] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a_ )
if rowsa != rowsa:
lowerCamelCase__ : List[Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a_ )
if len(a_ ) != rowsa:
lowerCamelCase__ : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"matrix but received {len(a_ )} and {rowsa}"
)
raise ValueError(a_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowerCamelCase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase__ : List[Any] = table.shape
strictly_diagonally_dominant(a_ )
# Iterates the whole matrix for given number of times
for _ in range(a_ ):
lowerCamelCase__ : List[Any] = []
for row in range(a_ ):
lowerCamelCase__ : Optional[int] = 0
for col in range(a_ ):
if col == row:
lowerCamelCase__ : List[str] = table[row][col]
elif col == cols - 1:
lowerCamelCase__ : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase__ : Optional[Any] = (temp + val) / denom
new_val.append(a_ )
lowerCamelCase__ : List[Any] = new_val
return [float(a_ ) for i in new_val]
def lowercase_ ( _A : NDArray[floataa] ):
"""simple docstring"""
lowerCamelCase__ : Tuple = table.shape
lowerCamelCase__ : Any = True
for i in range(0 , a_ ):
lowerCamelCase__ : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : List[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Tuple = inputs_dict["labels"]
lowerCamelCase__ : Any = inputs_dict["labels"]
lowerCamelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
lowerCamelCase__ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=18 , __lowerCamelCase : int=30 , __lowerCamelCase : Dict=400 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = size if size is not None else {"shortest_edge": 18}
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Any = min_resolution
lowerCamelCase__ : Optional[int] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Dict = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : int = crop_size
lowerCamelCase__ : Any = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean
lowerCamelCase__ : List[str] = image_std
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
A__ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A : Dict = get_tests_dir("fixtures")
A : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A : Any = get_tests_dir("fixtures/dummy-config.json")
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 0
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Dict = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop("feature_extractor_type" )
lowerCamelCase__ : Optional[int] = WavaVecaFeatureExtractor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
lowerCamelCase__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase__ : int = AutoFeatureExtractor.from_pretrained("bert-base" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(snake_case_ , revision="aaaaaa" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowerCamelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_ )
lowerCamelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_ )
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ : Any = CustomFeatureExtractor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_ )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str ):
'''simple docstring'''
class _lowercase ( _a):
"""simple docstring"""
A__ = True
try:
AutoConfig.register("custom" , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(snake_case_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "roberta"
def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : int = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : str = analyze_text(__UpperCamelCase )
lowerCamelCase__ : Any = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : str = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : str = single_char_strings[ch]
lowerCamelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
lowerCamelCase__ : str = sum(two_char_strings.values() )
lowerCamelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : str = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : Any = two_char_strings[sequence]
lowerCamelCase__ : Optional[int] = int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : str = Counter() # type: ignore
lowerCamelCase__ : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase_ ( _A : Tuple , _A : int , _A : List[str] , _A : Any=5 ):
"""simple docstring"""
assert masked_input.count("<mask>" ) == 1
lowerCamelCase__ : List[str] = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
lowerCamelCase__ : str = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
lowerCamelCase__ : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCamelCase__ : Dict = logits[0, masked_index, :]
lowerCamelCase__ : int = logits.softmax(dim=0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = prob.topk(k=snake_case__ , dim=0 )
lowerCamelCase__ : Any = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
lowerCamelCase__ : Optional[Any] = tokenizer.mask_token
lowerCamelCase__ : Optional[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
lowerCamelCase__ : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
A : Any = CamembertTokenizer.from_pretrained("camembert-base")
A : int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
A : str = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase__ : Any = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Any = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = set()
lowerCamelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Any = char
return pairs
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : List[Any] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Dict = tuple(__lowerCamelCase )
lowerCamelCase__ : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase )
lowerCamelCase__ : Dict = word
return word
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase )
lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
lowerCamelCase__ : Tuple = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase__ : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Dict = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Optional[int] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 'retribert'
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : int=768 , __lowerCamelCase : str=8 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : int=0 , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : List[str] = share_encoders
lowerCamelCase__ : str = projection_dim
| 700 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = KandinskyVaaImgaImgPipeline
A__ = ["image_embeds", "negative_image_embeds", "image"]
A__ = [
"image_embeds",
"negative_image_embeds",
"image",
]
A__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.dummy_unet
lowerCamelCase__ : Optional[Any] = self.dummy_movq
lowerCamelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "cpu"
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase__ : Any = "A red cartoon frog, 4k"
lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase__ : Optional[Any] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=7 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=18 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Dict=400 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=None , __lowerCamelCase : str=True , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=True , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 20}
lowerCamelCase__ : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Dict = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : Dict = do_resize
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : int = do_center_crop
lowerCamelCase__ : Dict = crop_size
lowerCamelCase__ : Optional[int] = do_flip_channel_order
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_flip_channel_order" ) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5 | 0 |
from __future__ import annotations
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0 ):
'''simple docstring'''
lowerCamelCase__ : List[str] = row, column
lowerCamelCase__ : List[Any] = [[default_value for c in range(__lowerCamelCase )] for r in range(__lowerCamelCase )]
def __str__( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
lowerCamelCase__ : Tuple = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ : int = max(__lowerCamelCase , len(str(__lowerCamelCase ) ) )
lowerCamelCase__ : Dict = f"%{max_element_length}s"
# Make string and return
def single_line(__lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : int ):
'''simple docstring'''
return str(self )
def lowerCAmelCase ( self : str , __lowerCamelCase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(__lowerCamelCase , (list, tuple) ) and len(__lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , __lowerCamelCase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(__lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : float ):
'''simple docstring'''
assert self.validate_indicies(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = value
def __add__( self : List[str] , __lowerCamelCase : Matrix ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Any = -self[r, c]
return result
def __sub__( self : Dict , __lowerCamelCase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , __lowerCamelCase : int | float | Matrix ):
'''simple docstring'''
if isinstance(__lowerCamelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Tuple = self[r, c] * another
return result
elif isinstance(__lowerCamelCase , __lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ : Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ : Optional[int] = f"Unsupported type given for another ({type(__lowerCamelCase )})"
raise TypeError(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Any = self[r, c]
return result
def lowerCAmelCase ( self : int , __lowerCamelCase : Matrix , __lowerCamelCase : Matrix ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ : Optional[int] = v.transpose()
lowerCamelCase__ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ : int = 1
print(F"a^(-1) is {ainv}" )
# u, v
lowerCamelCase__ : Optional[Any] = Matrix(3 , 1 , 0 )
lowerCamelCase__ : Optional[int] = 1, 2, -3
lowerCamelCase__ : List[Any] = Matrix(3 , 1 , 0 )
lowerCamelCase__ : Optional[int] = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 703 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=99 , __lowerCamelCase : Any=[1, 1, 2] , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Optional[int]="gelu_new" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=False , ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : Tuple = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = block_sizes
lowerCamelCase__ : Dict = num_decoder_layers
lowerCamelCase__ : Dict = d_model
lowerCamelCase__ : Tuple = n_head
lowerCamelCase__ : Tuple = d_head
lowerCamelCase__ : str = d_inner
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout
lowerCamelCase__ : Union[str, Any] = attention_dropout
lowerCamelCase__ : int = activation_dropout
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : int = scope
lowerCamelCase__ : List[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCamelCase__ : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
lowerCamelCase__ : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCamelCase__ : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCamelCase__ : int = self.num_hidden_layers + 2
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = None
if self.use_input_mask:
lowerCamelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFFunnelModel(config=_a )
lowerCamelCase__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : int = model(_a )
lowerCamelCase__ : Tuple = [input_ids, input_mask]
lowerCamelCase__ : Dict = model(_a )
lowerCamelCase__ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = TFFunnelModel(config=_a )
lowerCamelCase__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : int = False
lowerCamelCase__ : List[Any] = TFFunnelModel(config=_a )
lowerCamelCase__ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFFunnelBaseModel(config=_a )
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[Any] = model(_a )
lowerCamelCase__ : int = [input_ids, input_mask]
lowerCamelCase__ : Any = model(_a )
lowerCamelCase__ : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Union[str, Any] = TFFunnelBaseModel(config=_a )
lowerCamelCase__ : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = TFFunnelBaseModel(config=_a )
lowerCamelCase__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : int = TFFunnelForPreTraining(config=_a )
lowerCamelCase__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFFunnelForMaskedLM(config=_a )
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Any = TFFunnelForSequenceClassification(config=_a )
lowerCamelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Union[str, Any] = TFFunnelForMultipleChoice(config=_a )
lowerCamelCase__ : Union[str, Any] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[Any] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Union[str, Any] = TFFunnelForTokenClassification(config=_a )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFFunnelForQuestionAnswering(config=_a )
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[Any] = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : List[str] = config_and_inputs
lowerCamelCase__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
A__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A__ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = TFFunnelModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=_a )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@require_tf
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
A__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A__ = False
A__ = False
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFFunnelModelTester(self , base=_a )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=_a )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_a )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
| 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A : Dict = "CompVis/stable-diffusion-v1-1"
A : Optional[Any] = "CompVis/stable-diffusion-v1-2"
A : int = "CompVis/stable-diffusion-v1-3"
A : Tuple = "CompVis/stable-diffusion-v1-4"
class _lowercase ( __a):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , __lowerCamelCase : bool = True , ):
'''simple docstring'''
super()._init_()
lowerCamelCase__ : Tuple = StableDiffusionPipeline.from_pretrained(a_ )
lowerCamelCase__ : int = StableDiffusionPipeline.from_pretrained(a_ )
lowerCamelCase__ : Any = StableDiffusionPipeline.from_pretrained(a_ )
lowerCamelCase__ : Any = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )}
def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Any , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase__ : Dict = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase__ : str = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase__ : Optional[int] = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase__ : Optional[int] = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : int = logging.get_logger(__name__)
A : int = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blip_2_vision_model"
def __init__( self : Optional[Any] , __lowerCamelCase : str=1408 , __lowerCamelCase : Optional[Any]=6144 , __lowerCamelCase : int=39 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=14 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.0_0_0_0_1 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Optional[int]=1E-1_0 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = qkv_bias
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , __lowerCamelCase : Any , **__lowerCamelCase : str ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : str = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase__ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blip_2_qformer"
def __init__( self : str , __lowerCamelCase : Tuple=30522 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : str=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : Union[str, Any]=1E-1_2 , __lowerCamelCase : str=0 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=1408 , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Tuple = position_embedding_type
lowerCamelCase__ : Any = cross_attention_frequency
lowerCamelCase__ : Tuple = encoder_hidden_size
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , __lowerCamelCase : List[str] , **__lowerCamelCase : str ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blip-2"
A__ = True
def __init__( self : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=32 , **__lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if vision_config is None:
lowerCamelCase__ : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase__ : int = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase__ : str = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase__ : Optional[Any] = BlipaVisionConfig(**__lowerCAmelCase )
lowerCamelCase__ : List[Any] = BlipaQFormerConfig(**__lowerCAmelCase )
lowerCamelCase__ : Any = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase__ : Tuple = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase )
lowerCamelCase__ : Dict = self.text_config.tie_word_embeddings
lowerCamelCase__ : str = self.text_config.is_encoder_decoder
lowerCamelCase__ : Union[str, Any] = num_query_tokens
lowerCamelCase__ : Any = self.vision_config.hidden_size
lowerCamelCase__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ : Dict = 1.0
lowerCamelCase__ : List[Any] = 0.0_2
@classmethod
def lowerCAmelCase ( cls : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCAmelCase , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : int = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : str = self.vision_config.to_dict()
lowerCamelCase__ : Optional[Any] = self.qformer_config.to_dict()
lowerCamelCase__ : Any = self.text_config.to_dict()
lowerCamelCase__ : str = self.__class__.model_type
return output
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 | 0 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=_UpperCAmelCase):
"""simple docstring"""
A__ = ["flax", "transformers"]
def __init__( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _lowercase ( metaclass=_UpperCAmelCase):
"""simple docstring"""
A__ = ["flax", "transformers"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _lowercase ( metaclass=_UpperCAmelCase):
"""simple docstring"""
A__ = ["flax", "transformers"]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _lowercase ( metaclass=_UpperCAmelCase):
"""simple docstring"""
A__ = ["flax", "transformers"]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
| 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : int = get_tests_dir("fixtures/test_sentencepiece.model")
A : int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
A : Optional[int] = '''>>zh<<'''
A : Any = '''Helsinki-NLP/'''
if is_torch_available():
A : Optional[Any] = '''pt'''
elif is_tf_available():
A : Any = '''tf'''
else:
A : int = '''jax'''
@require_sentencepiece
class _lowercase ( a__ , unittest.TestCase):
"""simple docstring"""
A__ = MarianTokenizer
A__ = False
A__ = True
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase__ : Dict = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowerCamelCase__ : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Dict ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowerCAmelCase ( self : Any , __lowerCamelCase : str ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = "</s>"
lowerCamelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 9 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
lowerCamelCase__ : Any = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(lowerCAmelCase__ , batch.input_ids[0] )
lowerCamelCase__ : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = [x.name for x in Path(lowerCAmelCase__ ).glob("*" )]
self.assertIn("source.spm" , lowerCAmelCase__ )
MarianTokenizer.from_pretrained(lowerCAmelCase__ )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : List[Any] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Tuple = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase__ : Union[str, Any] = "Tämä on testi"
lowerCamelCase__ : Any = "This is a test"
lowerCamelCase__ : Optional[Any] = [76, 7, 2047, 2]
lowerCamelCase__ : Dict = [69, 12, 11, 940, 2]
lowerCamelCase__ : int = tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase__ : int = tokenizer(text_target=lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase__ : str = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = BarthezTokenizer
A__ = BarthezTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "<pad>"
lowerCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 101122 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2]
lowerCamelCase__ : Tuple = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 5 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A : Any = logging.getLogger(__name__)
class _lowercase ( UpperCAmelCase__):
A__ = "summarization"
A__ = ["loss"]
A__ = ROUGE_KEYS
A__ = "rouge2"
def __init__( self : int , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase__ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(__lowerCAmelCase , num_labels=__lowerCAmelCase , mode=self.mode , **__lowerCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
lowerCamelCase__ : Tuple = Path(self.output_dir ) / "metrics.json"
lowerCamelCase__ : List[str] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase__ : str = 0
lowerCamelCase__ : List[Any] = defaultdict(__lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = self.config.model_type
lowerCamelCase__ : Optional[Any] = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
lowerCamelCase__ : Dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase__ : str = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
lowerCamelCase__ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase__ : List[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase__ : Optional[Any] = get_git_info()["repo_sha"]
lowerCamelCase__ : List[str] = hparams.num_workers
lowerCamelCase__ : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCAmelCase ):
lowerCamelCase__ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase__ : Optional[Any] = self.decoder_start_token_id
lowerCamelCase__ : List[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase__ : Dict = self.hparams.eval_max_gen_length
else:
lowerCamelCase__ : Any = self.model.config.max_length
lowerCamelCase__ : Tuple = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(__lowerCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
lowerCamelCase__ : Optional[Any] = True
return readable_batch
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.model(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tokenizer.batch_decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return lmap(str.strip , __lowerCAmelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.tokenizer.pad_token_id
lowerCamelCase__ , lowerCamelCase__ : List[Any] = batch["input_ids"], batch["attention_mask"]
lowerCamelCase__ : Any = batch["labels"]
if isinstance(self.model , __lowerCAmelCase ):
lowerCamelCase__ : List[Any] = self.model._shift_right(__lowerCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = shift_tokens_right(__lowerCAmelCase , __lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase__ : Dict = decoder_input_ids
self.save_readable_batch(__lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = self(__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , use_cache=__lowerCAmelCase )
lowerCamelCase__ : Optional[int] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase__ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=__lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase__ : int = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase__ : List[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = label_smoothed_nll_loss(
__lowerCAmelCase , __lowerCAmelCase , self.hparams.label_smoothing , ignore_index=__lowerCAmelCase )
return (loss,)
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self._step(__lowerCAmelCase )
lowerCamelCase__ : int = dict(zip(self.loss_names , __lowerCAmelCase ) )
# tokens per batch
lowerCamelCase__ : Tuple = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
lowerCamelCase__ : int = batch["input_ids"].shape[0]
lowerCamelCase__ : int = batch["input_ids"].eq(self.pad ).sum()
lowerCamelCase__ : Optional[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
return self._generative_step(__lowerCAmelCase )
def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str="val" ):
'''simple docstring'''
self.step_count += 1
lowerCamelCase__ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase__ : List[str] = losses["loss"]
lowerCamelCase__ : str = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
lowerCamelCase__ : int = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase__ : List[Any] = torch.tensor(__lowerCAmelCase ).type_as(__lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__lowerCAmelCase )
lowerCamelCase__ : str = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
lowerCamelCase__ : int = self.step_count
self.metrics[prefix].append(__lowerCAmelCase ) # callback writes this to self.metrics_save_path
lowerCamelCase__ : Optional[Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str] ):
'''simple docstring'''
return calculate_rouge(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase__ : Tuple = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=__lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase__ : List[str] = (time.time() - ta) / batch["input_ids"].shape[0]
lowerCamelCase__ : str = self.ids_to_clean_text(__lowerCAmelCase )
lowerCamelCase__ : int = self.ids_to_clean_text(batch["labels"] )
lowerCamelCase__ : int = self._step(__lowerCAmelCase )
lowerCamelCase__ : str = dict(zip(self.loss_names , __lowerCAmelCase ) )
lowerCamelCase__ : str = self.calc_generative_metrics(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = np.mean(lmap(__lowerCAmelCase , __lowerCAmelCase ) )
base_metrics.update(gen_time=__lowerCAmelCase , gen_len=__lowerCAmelCase , preds=__lowerCAmelCase , target=__lowerCAmelCase , **__lowerCAmelCase )
return base_metrics
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
return self._generative_step(__lowerCAmelCase )
def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.validation_epoch_end(__lowerCAmelCase , prefix="test" )
def lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.n_obs[type_path]
lowerCamelCase__ : Tuple = self.target_lens[type_path]
lowerCamelCase__ : Any = self.dataset_class(
self.tokenizer , type_path=__lowerCAmelCase , n_obs=__lowerCAmelCase , max_target_length=__lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_dataset(__lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase__ : str = dataset.make_sortish_sampler(__lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase__ : Tuple = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=__lowerCAmelCase )
return dataloader
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
add_generic_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"--max_source_length" , default=1024 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=__lowerCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=__lowerCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument("--logger_name" , type=__lowerCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=__lowerCAmelCase , default=500 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=__lowerCAmelCase , default="summarization" , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=__lowerCAmelCase , default=0.0 , required=__lowerCAmelCase )
parser.add_argument("--src_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase )
parser.add_argument("--eval_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase )
parser.add_argument(
"--val_metric" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=__lowerCAmelCase , default=1 , required=__lowerCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class _lowercase ( UpperCAmelCase__):
A__ = "translation"
A__ = ["loss"]
A__ = ["bleu"]
A__ = "bleu"
def __init__( self : Union[str, Any] , __lowerCamelCase : str , **__lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ : List[Any] = hparams.src_lang
lowerCamelCase__ : Optional[int] = hparams.tgt_lang
def lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return calculate_bleu(__lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( _A : Tuple , _A : int=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ )
check_output_dir(UpperCAmelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase__ : str = SummarizationModule(UpperCAmelCase__ )
else:
lowerCamelCase__ : Any = TranslationModule(UpperCAmelCase__ )
lowerCamelCase__ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
lowerCamelCase__ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase__ : Dict = os.environ.get("WANDB_PROJECT" , UpperCAmelCase__ )
lowerCamelCase__ : str = WandbLogger(name=model.output_dir.name , project=UpperCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase__ : List[str] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
lowerCamelCase__ : int = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[int] = args.val_metric == "loss"
lowerCamelCase__ : Tuple = generic_train(
UpperCAmelCase__ , UpperCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCAmelCase__ ) , early_stopping_callback=UpperCAmelCase__ , logger=UpperCAmelCase__ , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
lowerCamelCase__ : Optional[int] = ""
lowerCamelCase__ : str = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=UpperCAmelCase__ ) )
if checkpoints:
lowerCamelCase__ : int = checkpoints[-1]
lowerCamelCase__ : List[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
A : Union[str, Any] = pl.Trainer.add_argparse_args(parser)
A : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A : Any = parser.parse_args()
main(args)
| 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = ["""image_processor""", """tokenizer"""]
A__ = """BridgeTowerImageProcessor"""
A__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Dict = True , __lowerCamelCase : Any = False , __lowerCamelCase : int = None , __lowerCamelCase : List[str] = None , __lowerCamelCase : Any = 0 , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : List[str] = None , __lowerCamelCase : int = False , __lowerCamelCase : int = False , __lowerCamelCase : str = False , __lowerCamelCase : Dict = False , __lowerCamelCase : List[str] = True , __lowerCamelCase : Tuple = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : int = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel_values + pixel_mask
lowerCamelCase__ : Optional[int] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCAmelCase ( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , *__lowerCamelCase : str , **__lowerCamelCase : str ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.tokenizer.model_input_names
lowerCamelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 0 |
import operator
def lowercase_ ( _A : List[str] , _A : Union[str, Any] = False , _A : int = None ):
"""simple docstring"""
lowerCamelCase__ : int = operator.lt if reverse else operator.gt
lowerCamelCase__ : Tuple = solution or []
if not arr:
return solution
lowerCamelCase__ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
lowerCamelCase__ : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__UpperCamelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def lowercase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def lowercase_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__UpperCamelCase ):
http_head("https://huggingface.co" )
| 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Optional[int] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : Union[str, Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : str = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : Any = BeautifulSoup(res.text, "html.parser")
A : Optional[Any] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _A : Optional[int] , _A : List[Any] = True , _A : List[str] = math.inf , _A : List[str] = -math.inf , _A : str = math.inf , _A : Optional[Any] = -math.inf , _A : Optional[int] = False , _A : List[Any] = 100 , _A : Tuple = 0.01 , _A : Dict = 1 , ):
"""simple docstring"""
lowerCamelCase__ : int = False
lowerCamelCase__ : Tuple = search_prob
lowerCamelCase__ : str = start_temperate
lowerCamelCase__ : Any = []
lowerCamelCase__ : str = 0
lowerCamelCase__ : str = None
while not search_end:
lowerCamelCase__ : Any = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : List[Any] = current_state
scores.append(_lowerCAmelCase )
iterations += 1
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : List[Any] = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
lowerCamelCase__ : Dict = neighbors.pop(_lowerCAmelCase )
lowerCamelCase__ : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : List[str] = picked_neighbor
else:
lowerCamelCase__ : List[str] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Optional[Any] = picked_neighbor
lowerCamelCase__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : Dict = True
else:
lowerCamelCase__ : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _A : List[str] , _A : Dict ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( _A : List[str] , _A : Optional[Any] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
A : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 715 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
A : Any = logging.getLogger(__name__)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(default=UpperCamelCase__ , metadata={"help": "The input training data file (a text file)."})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.train_file is not None:
lowerCamelCase__ : List[str] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _lowercase :
"""simple docstring"""
A__ = 42
A__ = True
A__ = None
A__ = None
def __call__( self : Union[str, Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : List[str] = "label" if "label" in features[0].keys() else "labels"
lowerCamelCase__ : Union[str, Any] = [feature.pop(__lowerCamelCase ) for feature in features]
lowerCamelCase__ : str = len(__lowerCamelCase )
lowerCamelCase__ : Dict = len(features[0]["input_ids"] )
lowerCamelCase__ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features
]
lowerCamelCase__ : Any = list(chain(*__lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
lowerCamelCase__ : List[str] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase__ : str = torch.tensor(__lowerCamelCase , dtype=torch.intaa )
return batch
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _A , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : int = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase__ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase__ : Any = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : str = data_args.validation_file
lowerCamelCase__ : Dict = data_args.train_file.split("." )[-1]
lowerCamelCase__ : List[str] = load_dataset(
_A , data_files=_A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase__ : Dict = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase__ : str = [F"ending{i}" for i in range(4 )]
lowerCamelCase__ : Dict = "sent1"
lowerCamelCase__ : str = "sent2"
if data_args.max_seq_length is None:
lowerCamelCase__ : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
lowerCamelCase__ : Any = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase__ : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_A : Tuple ):
lowerCamelCase__ : int = [[context] * 4 for context in examples[context_name]]
lowerCamelCase__ : List[str] = examples[question_header_name]
lowerCamelCase__ : Any = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_A )
]
# Flatten out
lowerCamelCase__ : Tuple = list(chain(*_A ) )
lowerCamelCase__ : Optional[Any] = list(chain(*_A ) )
# Tokenize
lowerCamelCase__ : List[Any] = tokenizer(
_A , _A , truncation=_A , max_length=_A , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowerCamelCase__ : Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowerCamelCase__ : Tuple = min(len(_A ) , data_args.max_train_samples )
lowerCamelCase__ : Any = train_dataset.select(range(_A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase__ : Tuple = train_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowerCamelCase__ : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Dict = min(len(_A ) , data_args.max_eval_samples )
lowerCamelCase__ : Dict = eval_dataset.select(range(_A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase__ : Optional[int] = eval_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase__ : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_A : Dict ):
lowerCamelCase__ , lowerCamelCase__ : Dict = eval_predictions
lowerCamelCase__ : int = np.argmax(_A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase__ : Tuple = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_A , data_collator=_A , compute_metrics=_A , )
# Training
if training_args.do_train:
lowerCamelCase__ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Optional[int] = last_checkpoint
lowerCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=_A )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : List[Any] = train_result.metrics
lowerCamelCase__ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
lowerCamelCase__ : Optional[Any] = min(_A , len(_A ) )
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase__ : Dict = trainer.evaluate()
lowerCamelCase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
lowerCamelCase__ : Any = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
lowerCamelCase__ : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : List[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Tuple = inputs_dict["labels"]
lowerCamelCase__ : Any = inputs_dict["labels"]
lowerCamelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
lowerCamelCase__ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowercase ( __a , unittest.TestCase):
"""simple docstring"""
A__ = FlaxAutoencoderKL
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = 4
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : List[Any] = (32, 32)
lowerCamelCase__ : Dict = jax.random.PRNGKey(0 )
lowerCamelCase__ : List[str] = jax.random.uniform(lowerCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowerCamelCase__ : Dict = self.dummy_input
return init_dict, inputs_dict
| 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A : Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase_ ( _A : Optional[Any] , _A : str ):
"""simple docstring"""
if args.student_type == "roberta":
lowerCamelCase__ : List[Any] = False
elif args.student_type == "gpt2":
lowerCamelCase__ : Optional[int] = False
def lowercase_ ( _A : List[Any] , _A : str ):
"""simple docstring"""
if args.student_type == "roberta":
lowerCamelCase__ : str = False
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=UpperCamelCase__ , choices=["distilbert", "roberta", "gpt2"] , required=UpperCamelCase__ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=UpperCamelCase__ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=UpperCamelCase__ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=UpperCamelCase__ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=UpperCamelCase__ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=UpperCamelCase__ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=UpperCamelCase__ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=UpperCamelCase__ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=UpperCamelCase__ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=UpperCamelCase__ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=UpperCamelCase__ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=UpperCamelCase__ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=UpperCamelCase__ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=UpperCamelCase__ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=UpperCamelCase__ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=UpperCamelCase__ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCamelCase__ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=UpperCamelCase__ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase__ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=UpperCamelCase__ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=UpperCamelCase__ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=UpperCamelCase__ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=UpperCamelCase__ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=UpperCamelCase__ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=UpperCamelCase__ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=UpperCamelCase__ , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=UpperCamelCase__ , default=4000 , help="Checkpoint interval." )
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
lowerCamelCase__ : str = MODEL_CLASSES[args.student_type]
lowerCamelCase__ : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase__ : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase__ : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase__ : Union[str, Any] = tokenizer.all_special_tokens.index(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
lowerCamelCase__ : Any = special_tok_ids
lowerCamelCase__ : int = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , "rb" ) as fp:
lowerCamelCase__ : Optional[int] = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , "rb" ) as fp:
lowerCamelCase__ : str = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase__ : Union[str, Any] = 0.0 # do not predict special tokens
lowerCamelCase__ : List[str] = torch.from_numpy(UpperCamelCase__ )
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : int = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
lowerCamelCase__ : List[Any] = student_config_class.from_pretrained(args.student_config )
lowerCamelCase__ : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
lowerCamelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
lowerCamelCase__ : Optional[Any] = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("Student loaded." )
# TEACHER #
lowerCamelCase__ : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase__ : Dict = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "roberta"
def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : int = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : List[str] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase__ : Any = get_sagemaker_input()
else:
lowerCamelCase__ : int = get_cluster_input()
return config
def lowercase_ ( _A : List[str]=None ):
"""simple docstring"""
if subparsers is not None:
lowerCamelCase__ : Optional[Any] = subparsers.add_parser("config" , description=_A )
else:
lowerCamelCase__ : List[Any] = argparse.ArgumentParser("Accelerate config command" , description=_A )
parser.add_argument(
"--config_file" , default=_A , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict = get_user_input()
if args.config_file is not None:
lowerCamelCase__ : Optional[Any] = args.config_file
else:
if not os.path.isdir(_A ):
os.makedirs(_A )
lowerCamelCase__ : Union[str, Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(_A )
else:
config.to_yaml_file(_A )
print(F"accelerate configuration saved at {config_file}" )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = config_command_parser()
lowerCamelCase__ : Tuple = parser.parse_args()
config_command(_A )
if __name__ == "__main__":
main()
| 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase__ : Any = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Any = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = set()
lowerCamelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Any = char
return pairs
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : List[Any] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Dict = tuple(__lowerCamelCase )
lowerCamelCase__ : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase )
lowerCamelCase__ : Dict = word
return word
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase )
lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
lowerCamelCase__ : Tuple = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase__ : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Dict = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A : int = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A : Optional[Any] = json.load(f)
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Tuple = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 2_6.0],
["ru-en", 2_2.0],
["en-de", 2_2.0],
["de-en", 2_9.0],
] )
@slow
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : str = f"facebook/wmt19-{pair}"
lowerCamelCase__ : List[str] = self.get_tokenizer(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = self.get_model(__lowerCamelCase )
lowerCamelCase__ : Tuple = bleu_data[pair]["src"]
lowerCamelCase__ : Tuple = bleu_data[pair]["tgt"]
lowerCamelCase__ : List[str] = tokenizer(__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase , padding="longest" ).to(__lowerCamelCase )
lowerCamelCase__ : Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
lowerCamelCase__ : List[str] = calculate_bleu(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores["bleu"] , __lowerCamelCase )
| 700 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = KandinskyVaaImgaImgPipeline
A__ = ["image_embeds", "negative_image_embeds", "image"]
A__ = [
"image_embeds",
"negative_image_embeds",
"image",
]
A__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.dummy_unet
lowerCamelCase__ : Optional[Any] = self.dummy_movq
lowerCamelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "cpu"
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase__ : Any = "A red cartoon frog, 4k"
lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase__ : Optional[Any] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowercase ( __snake_case):
"""simple docstring"""
A__ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A__ = "CIDAS/clipseg-rd64-refined"
A__ = "image_segmenter"
A__ = CLIPSegForImageSegmentation
A__ = ["image", "text"]
A__ = ["image"]
def __init__( self : int , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__UpperCamelCase , return_tensors="pt" )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase__ : Any = self.model(**__UpperCamelCase ).logits
return logits
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = outputs.cpu().detach().numpy()
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
A : Optional[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
A__ = field(
default=_UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
A__ = field(
default=_UpperCAmelCase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
A__ = field(default=_UpperCAmelCase , metadata={"help": "A folder containing the training data."})
A__ = field(default=_UpperCAmelCase , metadata={"help": "A folder containing the validation data."})
A__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
A__ = field(default=32 , metadata={"help": "The size of the square patches to use for masking."})
A__ = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = {}
if self.train_dir is not None:
lowerCamelCase__ : Any = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Tuple = self.validation_dir
lowerCamelCase__ : int = data_files if data_files else None
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_UpperCAmelCase)} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
A__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ = field(default=_UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."})
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Stride to use for the encoder."} , )
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any]=192 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : List[str]=0.6 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = input_size
lowerCamelCase__ : List[Any] = mask_patch_size
lowerCamelCase__ : Tuple = model_patch_size
lowerCamelCase__ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowerCamelCase__ : List[Any] = self.input_size // self.mask_patch_size
lowerCamelCase__ : List[str] = self.mask_patch_size // self.model_patch_size
lowerCamelCase__ : Optional[int] = self.rand_size**2
lowerCamelCase__ : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = np.random.permutation(self.token_count )[: self.mask_count]
lowerCamelCase__ : Optional[Any] = np.zeros(self.token_count , dtype=lowercase__ )
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Any = mask.reshape((self.rand_size, self.rand_size) )
lowerCamelCase__ : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Any = torch.stack([example["pixel_values"] for example in examples] )
lowerCamelCase__ : Optional[int] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : List[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[str] = split["""train"""]
lowerCamelCase__ : List[str] = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCamelCase__ : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowerCamelCase__ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE__ , "decoder_type" ):
lowerCamelCase__ : str = """simmim"""
# adapt config
lowerCamelCase__ : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCamelCase__ : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCamelCase__ : List[Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ : str = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCamelCase__ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : List[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCamelCase__ : Optional[int] = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
lowerCamelCase__ : str = ds["""train"""].column_names
else:
lowerCamelCase__ : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : Any = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : str = """image"""
elif "img" in column_names:
lowerCamelCase__ : Dict = """img"""
else:
lowerCamelCase__ : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCamelCase__ : List[str] = Compose(
[
Lambda(lambda _A : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCamelCase__ : Any = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A : List[Any] ):
lowerCamelCase__ : List[str] = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
lowerCamelCase__ : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
lowerCamelCase__ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
lowerCamelCase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : int = last_checkpoint
lowerCamelCase__ : List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Optional[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 703 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=10 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Optional[int]=400 , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : str = size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase__ : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase__ : int = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[int] = num_frames
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Tuple = min_resolution
lowerCamelCase__ : Optional[int] = max_resolution
lowerCamelCase__ : Optional[Any] = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : str = image_mean
lowerCamelCase__ : List[str] = image_std
lowerCamelCase__ : Tuple = crop_size
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
A__ = VivitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VivitImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , "image_mean" ) )
self.assertTrue(hasattr(__snake_case , "image_std" ) )
self.assertTrue(hasattr(__snake_case , "do_normalize" ) )
self.assertTrue(hasattr(__snake_case , "do_resize" ) )
self.assertTrue(hasattr(__snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(__snake_case , "size" ) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase__ : Tuple = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(__snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : int = image_processing(__snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Dict = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(__snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 0 |
import math
from collections.abc import Callable
def lowercase_ ( _A : Callable[[float], float] , _A : float , _A : float ):
"""simple docstring"""
lowerCamelCase__ : Dict = xa
lowerCamelCase__ : int = xa
while True:
if x_n == x_na or function(_A ) == function(_A ):
raise ZeroDivisionError("float division by zero, could not find root" )
lowerCamelCase__ : Dict = x_na - (
function(_A ) / ((function(_A ) - function(_A )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase__ : Tuple = x_na
lowerCamelCase__ : Optional[int] = x_na
def lowercase_ ( _A : float ):
"""simple docstring"""
return math.pow(_A , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _lowercase ( _UpperCamelCase):
"""simple docstring"""
A__ = "vit_mae"
def __init__( self : List[Any] , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=3072 , __lowerCamelCase : int="gelu" , __lowerCamelCase : str=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : Any=224 , __lowerCamelCase : Dict=16 , __lowerCamelCase : int=3 , __lowerCamelCase : str=True , __lowerCamelCase : Dict=16 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Tuple=0.7_5 , __lowerCamelCase : Optional[Any]=False , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : int = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : List[str] = decoder_num_attention_heads
lowerCamelCase__ : Any = decoder_hidden_size
lowerCamelCase__ : Optional[int] = decoder_num_hidden_layers
lowerCamelCase__ : Optional[Any] = decoder_intermediate_size
lowerCamelCase__ : str = mask_ratio
lowerCamelCase__ : Optional[Any] = norm_pix_loss
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A : Union[str, Any] = "\\n\n"
A : Dict = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
A : str = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = 16 , __lowerCamelCase : bool = True , __lowerCamelCase : Dict=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCamelCase__ : Optional[Any] = "cuda"
else:
lowerCamelCase__ : str = "cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase__ : str = AutoModelForCausalLM.from_pretrained(_snake_case )
lowerCamelCase__ : List[str] = model.to(_snake_case )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCamelCase__ : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCamelCase__ : Any = model.config.max_length - 1
else:
lowerCamelCase__ : List[str] = model.config.max_length
lowerCamelCase__ : str = tokenizer(
_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors="pt" , return_attention_mask=_snake_case , ).to(_snake_case )
lowerCamelCase__ : int = encodings["input_ids"]
lowerCamelCase__ : Any = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_snake_case ) , _snake_case ) ):
lowerCamelCase__ : int = min(start_index + batch_size , len(_snake_case ) )
lowerCamelCase__ : Dict = encoded_texts[start_index:end_index]
lowerCamelCase__ : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
lowerCamelCase__ : List[str] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
lowerCamelCase__ : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCamelCase__ : Optional[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_snake_case ), attn_mask] , dim=1 )
lowerCamelCase__ : List[str] = encoded_batch
with torch.no_grad():
lowerCamelCase__ : int = model(_snake_case , attention_mask=_snake_case ).logits
lowerCamelCase__ : Dict = out_logits[..., :-1, :].contiguous()
lowerCamelCase__ : Any = labels[..., 1:].contiguous()
lowerCamelCase__ : Union[str, Any] = attn_mask[..., 1:].contiguous()
lowerCamelCase__ : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from __future__ import annotations
def lowercase_ ( _A : Optional[Any] , _A : Optional[Any] , _A : int , _A : Union[str, Any] ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = array[indexa], array[indexa]
def lowercase_ ( _A : List[Any] , _A : int , _A : Any , _A : Optional[Any] ):
"""simple docstring"""
if length > 1:
lowerCamelCase__ : Union[str, Any] = int(length / 2 )
for i in range(_A , low + middle ):
comp_and_swap(_A , _A , i + middle , _A )
bitonic_merge(_A , _A , _A , _A )
bitonic_merge(_A , low + middle , _A , _A )
def lowercase_ ( _A : Optional[int] , _A : Dict , _A : Tuple , _A : Union[str, Any] ):
"""simple docstring"""
if length > 1:
lowerCamelCase__ : Optional[Any] = int(length / 2 )
bitonic_sort(_A , _A , _A , 1 )
bitonic_sort(_A , low + middle , _A , 0 )
bitonic_merge(_A , _A , _A , _A )
if __name__ == "__main__":
A : Any = input("Enter numbers separated by a comma:\n").strip()
A : Dict = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = BarthezTokenizer
A__ = BarthezTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "<pad>"
lowerCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 101122 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2]
lowerCamelCase__ : Tuple = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 5 | 0 |
import math
def lowercase_ ( _A : int , _A : Dict ):
"""simple docstring"""
if (
not isinstance(snake_case_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowercase_ ( _A : Union[str, Any] , _A : List[Any] ):
"""simple docstring"""
if (
not isinstance(snake_case_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Dict = '▁'
A : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A : List[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A : Any = {
'facebook/s2t-small-librispeech-asr': 1024,
}
A : Union[str, Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A : Dict = {'mustc': MUSTC_LANGS}
class _lowercase ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ["input_ids", "attention_mask"]
A__ = []
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str = None , **__lowerCamelCase : Dict , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_upper_case=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , lang_codes=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase__ : int = do_upper_case
lowerCamelCase__ : List[str] = do_lower_case
lowerCamelCase__ : List[Any] = load_json(UpperCamelCase__ )
lowerCamelCase__ : str = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = spm_file
lowerCamelCase__ : int = load_spm(UpperCamelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase__ : Tuple = lang_codes
lowerCamelCase__ : Any = LANGUAGES[lang_codes]
lowerCamelCase__ : Any = [f"<lang:{lang}>" for lang in self.langs]
lowerCamelCase__ : Optional[int] = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
lowerCamelCase__ : Any = self.lang_tokens
lowerCamelCase__ : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase__ : str = {}
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCamelCase__ )
def lowerCAmelCase ( self : str , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.lang_code_to_id[tgt_lang]
lowerCamelCase__ : Optional[Any] = [lang_code_id]
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder[self.unk_token] )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase__ : Any = self.sp_model.decode(UpperCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase__ : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.sp_model.decode(UpperCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [1] * len(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Any = None
return state
def __setstate__( self : str , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase__ : int = {}
lowerCamelCase__ : str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
lowerCamelCase__ : int = Path(UpperCamelCase__ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
lowerCamelCase__ : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , UpperCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (str(UpperCamelCase__ ), str(UpperCamelCase__ ))
def lowercase_ ( _A : str , _A : Dict[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowercase_ ( _A : str ):
"""simple docstring"""
with open(__UpperCamelCase , "r" ) as f:
return json.load(__UpperCamelCase )
def lowercase_ ( _A : Optional[Any] , _A : str ):
"""simple docstring"""
with open(__UpperCamelCase , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _A : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = []
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_ ) )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_ ) )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowercase_ ( _A : int , _A : Tuple[int, ...] ):
"""simple docstring"""
lowerCamelCase__ : Dict = []
for d in reversed(lowerCamelCase_ ):
idx.append(flat_idx % d )
lowerCamelCase__ : Optional[int] = flat_idx // d
return tuple(reversed(lowerCamelCase_ ) )
@torch.jit.ignore
def lowercase_ ( _A : Sequence[int] , _A : Sequence[int] , _A : Sequence[int] , _A : Optional[Sequence[bool]] = None , _A : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(_A : List[bool] ) -> None:
lowerCamelCase__ : Union[str, Any] = True
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : str = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase__ : List[str] = l[reversed_idx]
if start_edges is None:
lowerCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_ )
if end_edges is None:
lowerCamelCase__ : Optional[int] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_ )]
reduce_edge_list(lowerCamelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_ ) == 0:
return [()]
elif len(lowerCamelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : int = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_ ):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1 ) )
else:
break
lowerCamelCase__ : Dict = tuple(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = len(lowerCamelCase_ )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase__ : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase__ : Dict = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase__ : Union[str, Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase_ ( _A : torch.Tensor , _A : int , _A : int , _A : int ):
"""simple docstring"""
lowerCamelCase__ : int = t.shape[:no_batch_dims]
lowerCamelCase__ : Union[str, Any] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_ ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_ ) )
# Get an ordered list of slices to perform
lowerCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase_ ( _A : Callable , _A : Dict[str, Any] , _A : int , _A : int , _A : bool = False , _A : Any = None , _A : bool = False , ):
"""simple docstring"""
if not (len(lowerCamelCase_ ) > 0):
raise ValueError("Must provide at least one input" )
lowerCamelCase__ : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_ )]
lowerCamelCase__ : Union[str, Any] = tuple([max(lowerCamelCase_ ) for s in zip(*lowerCamelCase_ )] )
def _prep_inputs(_A : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase__ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase__ : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase__ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase__ : Tuple = tensor_tree_map(_prep_inputs , lowerCamelCase_ )
lowerCamelCase__ : Tuple = None
if _out is not None:
lowerCamelCase__ : List[str] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase__ : str = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase__ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Union[str, Any] = prepped_outputs
for _ in range(lowerCamelCase_ ):
# Chunk the input
if not low_mem:
lowerCamelCase__ : Optional[int] = _select_chunk
else:
lowerCamelCase__ : int = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size ) , no_batch_dims=len(lowerCamelCase_ ) , )
lowerCamelCase__ : Union[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_ )
# Run the layer on the chunk
lowerCamelCase__ : List[Any] = layer(**lowerCamelCase_ )
# Allocate space for the output
if out is None:
lowerCamelCase__ : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
def assign(_A : dict , _A : dict ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
assign(lowerCamelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase__ : Optional[Any] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase__ : Any = xa
elif isinstance(lowerCamelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase__ : Tuple = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
lowerCamelCase__ : Tuple = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase_ )
return out
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[str] = 512 , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = max_chunk_size
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase__ : List[Any] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase__ : Dict = [c for c in candidates if c > min_chunk_size]
lowerCamelCase__ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCamelCase : List[str] ) -> bool:
try:
with torch.no_grad():
fn(*_lowerCamelCase , chunk_size=_lowerCamelCase )
return True
except RuntimeError:
return False
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = len(_lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase__ : Dict = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase__ : Any = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase__ : Union[str, Any] = i
lowerCamelCase__ : str = (i + len(_lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
for aa, aa in zip(_lowerCamelCase , _lowerCamelCase ):
assert type(_lowerCamelCase ) == type(_lowerCamelCase )
if isinstance(_lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
lowerCamelCase__ : int = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ):
'''simple docstring'''
lowerCamelCase__ : str = True
lowerCamelCase__ : Optional[int] = tree_map(lambda __lowerCamelCase : a.shape if isinstance(_lowerCamelCase , torch.Tensor ) else a , _lowerCamelCase , _lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_lowerCamelCase )
lowerCamelCase__ : Dict = self._compare_arg_caches(self.cached_arg_data , _lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase__ : Tuple = False
if not consistent:
lowerCamelCase__ : Union[str, Any] = self._determine_favorable_chunk_size(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5 | 0 |
from __future__ import annotations
def lowercase_ ( _A : int | float | str , _A : int | float | str ):
if nth_term == "":
return [""]
lowerCamelCase__ : Any = int(__snake_case )
lowerCamelCase__ : Union[str, Any] = int(__snake_case )
lowerCamelCase__ : list[str] = []
for temp in range(int(__snake_case ) ):
series.append(F"1 / {pow(temp + 1 , int(__snake_case ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Union[str, Any] = int(input("Enter the last number (nth term) of the P-Series"))
A : Optional[Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 0 |
import math
import sys
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
if number != int(a__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowerCamelCase__ : Optional[Any] = [-1] * (number + 1)
lowerCamelCase__ : Union[str, Any] = 0
for i in range(1 , number + 1 ):
lowerCamelCase__ : List[Any] = sys.maxsize
lowerCamelCase__ : int = int(math.sqrt(a__ ) )
for j in range(1 , root + 1 ):
lowerCamelCase__ : Optional[int] = 1 + answers[i - (j**2)]
lowerCamelCase__ : str = min(a__ , a__ )
lowerCamelCase__ : int = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Tuple = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ["DeiTFeatureExtractor"]
A : Tuple = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
def lowercase_ ( _A : Union[str, Any] , _A : List[str] , _A : Dict=False ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
lowerCamelCase__ : Tuple = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
lowerCamelCase__ : Dict = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
lowerCamelCase__ : int = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
A : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A : Optional[int] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 715 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
import numpy as np
import qiskit
def lowercase_ ( _A : int = 8 , _A : int | None = None ):
"""simple docstring"""
lowerCamelCase__ : str = np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase__ : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase__ : str = rng.integers(2 , size=_A )
# The set of states Alice will prepare.
lowerCamelCase__ : Dict = rng.integers(2 , size=_A )
# Measurement basis for Bob's qubits.
lowerCamelCase__ : Tuple = rng.integers(2 , size=_A )
# Quantum Circuit to simulate BB84
lowerCamelCase__ : List[str] = qiskit.QuantumCircuit(_A , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase__ : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase__ : List[Any] = qiskit.execute(_A , _A , shots=1 , seed_simulator=_A )
# Returns the result of measurement.
lowerCamelCase__ : List[Any] = job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A , _A , _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase__ : Any = gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , "0" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
A : List[str] = datasets.logging.get_logger(__name__)
A : Union[str, Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
A : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
A : Tuple = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.config_name == "default":
lowerCamelCase__ : str = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
lowerCamelCase__ : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=False ):
'''simple docstring'''
if gpus is None:
lowerCamelCase__ : List[str] = 1 if torch.cuda.is_available() else 0
lowerCamelCase__ : Tuple = {"src": sources, "mt": predictions, "ref": references}
lowerCamelCase__ : Tuple = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : List[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Tuple = inputs_dict["labels"]
lowerCamelCase__ : Any = inputs_dict["labels"]
lowerCamelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
lowerCamelCase__ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
A : Tuple = {"target_lang": "fi", "source_lang": "en"}
A : Any = ">>zh<<"
A : List[str] = "Helsinki-NLP/"
if is_torch_available():
A : Optional[int] = "pt"
elif is_tf_available():
A : str = "tf"
else:
A : Optional[int] = "jax"
@require_sentencepiece
class _lowercase ( __snake_case , unittest.TestCase):
"""simple docstring"""
A__ = MarianTokenizer
A__ = False
A__ = True
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Any = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowerCamelCase__ : Any = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase__ : Any = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = """</s>"""
lowerCamelCase__ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 9 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
lowerCamelCase__ : Optional[Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowerCamelCase__ : Optional[int] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_lowercase , batch.input_ids[0] )
lowerCamelCase__ : str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowercase )
lowerCamelCase__ : Any = [x.name for x in Path(_lowercase ).glob("*" )]
self.assertIn("source.spm" , _lowercase )
MarianTokenizer.from_pretrained(_lowercase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_lowercase , truncation=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["I am a tiny frog", "I am a small frog"] , padding=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase__ : Optional[int] = """Tämä on testi"""
lowerCamelCase__ : Tuple = """This is a test"""
lowerCamelCase__ : List[Any] = [76, 7, 2047, 2]
lowerCamelCase__ : List[Any] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Union[str, Any] = tokenizer(_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
lowerCamelCase__ : Optional[int] = tokenizer(text_target=_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
lowerCamelCase__ : Tuple = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=18 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Optional[int]=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = min_resolution
lowerCamelCase__ : Union[str, Any] = max_resolution
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : Any = apply_ocr
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "apply_ocr" ) )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
lowerCamelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , A__ )
self.assertIsInstance(encoding.boxes , A__ )
# Test batched
lowerCamelCase__ : Dict = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase__ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase__ : str = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase__ : Tuple = image_processing(A__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase__ : Any = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCamelCase__ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__ )
self.assertListEqual(encoding.boxes , A__ )
# with apply_OCR = False
lowerCamelCase__ : str = LayoutLMvaImageProcessor(apply_ocr=A__ )
lowerCamelCase__ : str = image_processing(A__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "roberta"
def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : int = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
A__ = WavaVecaPhonemeCTCTokenizer
A__ = False
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : str = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
lowerCamelCase__ : List[Any] = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase__ : int = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]=20 , __lowerCamelCase : Dict=5 ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_a )) for i in range(len(_a ) )]
lowerCamelCase__ : Optional[int] = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
lowerCamelCase__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
lowerCamelCase__ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Optional[int] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Any = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
lowerCamelCase__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
lowerCamelCase__ : Any = " " + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def lowerCAmelCase ( self : int , **__lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
lowerCamelCase__ : Union[str, Any] = tokenizer("m xxx ɪ" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
lowerCamelCase__ : Optional[int] = tokenizer("m aaa ɪ ccc" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase__ : Dict = tokenizer("maɪ c" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [3, 200] ) # mai should be <unk> (=3)
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCamelCase__ : List[str] = "Hello how are you"
lowerCamelCase__ : Optional[Any] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(_a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCamelCase__ : str = "Hello how are you"
lowerCamelCase__ : str = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCamelCase__ : Optional[Any] = "Hello how are you"
lowerCamelCase__ : List[Any] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
lowerCamelCase__ : List[str] = tokenizer.decode(tokenizer(_a ).input_ids )
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCamelCase__ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCamelCase__ : int = tokenizer.decode(sample_ids[0] )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCamelCase__ : Tuple = "Hello how are you"
lowerCamelCase__ : Optional[Any] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(_a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCamelCase__ : str = "Hello how are you"
lowerCamelCase__ : List[Any] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
lowerCamelCase__ : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase__ : List[Any] = tokenizer.decode(sample_ids[0] )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
lowerCamelCase__ : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_a )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(_a , filter_word_delimiter_token=_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCamelCase__ : Any = "Hello how are you"
lowerCamelCase__ : Union[str, Any] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
lowerCamelCase__ : List[str] = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCamelCase__ : str = "Hello how are you"
lowerCamelCase__ : Optional[int] = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
lowerCamelCase__ : List[str] = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _a )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_a )
lowerCamelCase__ : Optional[int] = "Hello how are you"
lowerCamelCase__ : Tuple = tokenizer(_a , phonemizer_lang="en-us" ).input_ids
lowerCamelCase__ : int = tokenizer(_a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_a , _a )
lowerCamelCase__ : int = tokenizer.decode(_a )
lowerCamelCase__ : int = tokenizer.decode(_a )
self.assertEqual(_a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_a , "ɛ l o h aʊ a ʁ j u" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCamelCase__ : Optional[Any] = "Hello how Are you"
lowerCamelCase__ : Optional[int] = "hello how are you"
lowerCamelCase__ : Dict = tokenizer(_a ).input_ids
lowerCamelCase__ : Tuple = tokenizer(_a ).input_ids
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
lowerCamelCase__ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCamelCase__ : int = tokenizer.batch_decode(_a )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase__ : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCamelCase__ : List[str] = tokenizer.decode(_a , output_char_offsets=_a , filter_word_delimiter_token=_a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_a , _a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
self.assertTrue(isinstance(_a , _a ) )
self.assertTrue(isinstance(outputs_list[0] , _a ) )
# transform list to ModelOutput
lowerCamelCase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
if isinstance(_a , _a ):
[recursive_check(_a , _a ) for la, la in zip(_a , _a )]
self.assertEqual(_a , _a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
lowerCamelCase__ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(_a , output_char_offsets=_a )
lowerCamelCase__ : Union[str, Any] = [tokenizer.decode(_a , output_char_offsets=_a ) for ids in sample_ids]
check_list_tuples_equal(_a , _a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Dict = tokenizer.vocab_size
lowerCamelCase__ : List[Any] = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase__ : str = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowerCamelCase__ : Optional[int] = tokenizer.add_tokens(_a )
lowerCamelCase__ : int = tokenizer.vocab_size
lowerCamelCase__ : Any = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase__ : Dict = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowerCamelCase__ : Optional[Any] = tokenizer.add_special_tokens(_a )
lowerCamelCase__ : Any = tokenizer.vocab_size
lowerCamelCase__ : Any = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
lowerCamelCase__ : Tuple = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Dict = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(output["text"] , _a )
| 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : str = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ["""PoolFormerFeatureExtractor"""]
A : List[str] = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase__ : Any = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Any = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = set()
lowerCamelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Any = char
return pairs
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : List[Any] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Dict = tuple(__lowerCamelCase )
lowerCamelCase__ : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase )
lowerCamelCase__ : Dict = word
return word
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase )
lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
lowerCamelCase__ : Tuple = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase__ : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Dict = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A : List[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _lowercase ( snake_case_):
"""simple docstring"""
A__ = """albert"""
def __init__( self : int , __lowerCamelCase : Dict=30000 , __lowerCamelCase : str=128 , __lowerCamelCase : Any=4096 , __lowerCamelCase : Dict=12 , __lowerCamelCase : int=1 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Any=16384 , __lowerCamelCase : int=1 , __lowerCamelCase : str="gelu_new" , __lowerCamelCase : Any=0 , __lowerCamelCase : Dict=0 , __lowerCamelCase : str=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=3 , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : List[str] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_hidden_groups
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = inner_group_num
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = position_embedding_type
class _lowercase ( snake_case_):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 700 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = KandinskyVaaImgaImgPipeline
A__ = ["image_embeds", "negative_image_embeds", "image"]
A__ = [
"image_embeds",
"negative_image_embeds",
"image",
]
A__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.dummy_unet
lowerCamelCase__ : Optional[Any] = self.dummy_movq
lowerCamelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "cpu"
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase__ : Any = "A red cartoon frog, 4k"
lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase__ : Optional[Any] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 5 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( *__lowerCamelCase : str , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
pass
def lowercase_ ( _A : Image ):
"""simple docstring"""
lowerCamelCase__ : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = DepthEstimationPipeline(model=lowercase__ , image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : int = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , lowercase__ )
import datasets
lowerCamelCase__ : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCamelCase__ : Optional[Any] = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , lowercase__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = "Intel/dpt-large"
lowerCamelCase__ : int = pipeline("depth-estimation" , model=lowercase__ )
lowerCamelCase__ : Tuple = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowerCamelCase__ : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowercase :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : str = "cpu" , __lowerCamelCase : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = device
lowerCamelCase__ : Any = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Tuple = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
lowerCamelCase__ : Optional[Any] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
lowerCamelCase__ : List[str] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase__ : str = torchvision.transforms.Resize(224 )
lowerCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : int = self.resize(__lowerCamelCase )
lowerCamelCase__ : Tuple = self.center_crop(__lowerCamelCase )
lowerCamelCase__ : Any = self.normalize(__lowerCamelCase )
return images
def __call__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[int] = self.preprocess_img(__lowerCamelCase )
lowerCamelCase__ : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowercase ( nn.Module):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Optional[int]=0.0_1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]="image" , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = device if device else get_device()
if vqgan:
lowerCamelCase__ : Any = vqgan
else:
lowerCamelCase__ : Dict = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
lowerCamelCase__ : Dict = clip
else:
lowerCamelCase__ : Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
lowerCamelCase__ : Any = ProcessorGradientFlow(device=self.device )
lowerCamelCase__ : Any = iterations
lowerCamelCase__ : Union[str, Any] = lr
lowerCamelCase__ : Tuple = log
lowerCamelCase__ : int = make_grid
lowerCamelCase__ : Optional[Any] = return_val
lowerCamelCase__ : List[Any] = quantize
lowerCamelCase__ : Optional[Any] = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=5 , __lowerCamelCase : Dict=True ):
'''simple docstring'''
lowerCamelCase__ : Any = []
if output_path is None:
lowerCamelCase__ : Optional[int] = './animation.gif'
if input_path is None:
lowerCamelCase__ : List[str] = self.save_path
lowerCamelCase__ : Tuple = sorted(glob(input_path + "/*" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
lowerCamelCase__ : List[Any] = total_duration / len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
lowerCamelCase__ : Union[str, Any] = 1.5
lowerCamelCase__ : Union[str, Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(f"gif saved to {output_path}" )
def lowerCAmelCase ( self : str , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
lowerCamelCase__ : Any = preprocess(Image.open(__lowerCamelCase ) , target_image_size=256 ).to(self.device )
lowerCamelCase__ : str = preprocess_vqgan(__lowerCamelCase )
lowerCamelCase__ : List[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.latent.detach().requires_grad_()
lowerCamelCase__ : Optional[int] = base_latent + transform_vector
if self.quantize:
lowerCamelCase__ : Dict = self.vqgan.quantize(__lowerCamelCase )
else:
lowerCamelCase__ : int = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
lowerCamelCase__ : int = self.clip(**__lowerCamelCase )
lowerCamelCase__ : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__ : List[str] = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
lowerCamelCase__ : Optional[Any] = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] )
else:
lowerCamelCase__ : str = torch.tensor([1] , device=self.device )
lowerCamelCase__ : Dict = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
lowerCamelCase__ : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase__ : Optional[Any] = self._add_vector(__lowerCamelCase )
lowerCamelCase__ : List[Any] = loop_post_process(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("CLIP loss" , __lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
wandb.init(reinit=__lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
lowerCamelCase__ : Optional[Any] = Image.open(__lowerCamelCase )
lowerCamelCase__ : Dict = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
if not prompts:
return []
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : int = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Tuple = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
lowerCamelCase__ : Any = prompt[0]
lowerCamelCase__ : List[str] = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase__ : Union[str, Any] = prompt.split(":" )
lowerCamelCase__ : Optional[Any] = float(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] = prompt
lowerCamelCase__ : Optional[Any] = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=None , ):
'''simple docstring'''
if image_path:
lowerCamelCase__ : Dict = self._get_latent(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__ : Union[str, Any] = self.process_prompts(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
lowerCamelCase__ : Optional[int] = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] = save_path + '_' + get_timestamp()
os.makedirs(__lowerCamelCase )
lowerCamelCase__ : Tuple = save_path
lowerCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCamelCase ) )
lowerCamelCase__ : Tuple = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5 | 0 |
import numpy as np
A : Union[str, Any] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
lowerCamelCase__ : Any = np.array(__lowerCamelCase )
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = np.where(letter == self.SQUARE )
lowerCamelCase__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : int = message.lower()
lowerCamelCase__ : Tuple = message.replace(" " , "" )
lowerCamelCase__ : int = message.replace("j" , "i" )
lowerCamelCase__ : Any = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowerCamelCase__ : Union[str, Any] = numbers[0]
lowerCamelCase__ : Union[str, Any] = numbers[1]
lowerCamelCase__ : Optional[int] = first_step.reshape(2 * len(__lowerCamelCase ) )
lowerCamelCase__ : int = ''
for numbers_index in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : str = int(second_step[numbers_index * 2] )
lowerCamelCase__ : str = int(second_step[(numbers_index * 2) + 1] )
lowerCamelCase__ : Tuple = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = message.lower()
message.replace(" " , "" )
lowerCamelCase__ : Tuple = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
lowerCamelCase__ : Optional[int] = numbers[0]
lowerCamelCase__ : Dict = numbers[1]
lowerCamelCase__ : Optional[int] = first_step.reshape((2, len(__lowerCamelCase )) )
lowerCamelCase__ : List[str] = ''
for numbers_index in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : List[Any] = int(second_step[0, numbers_index] )
lowerCamelCase__ : Optional[int] = int(second_step[1, numbers_index] )
lowerCamelCase__ : Tuple = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str = decoded_message + letter
return decoded_message
| 703 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
A : List[Any] = 5
A : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase):
"""simple docstring"""
A__ = SpeechaTextTokenizer
A__ = False
A__ = True
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[Any] = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
lowerCamelCase__ : Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
lowerCamelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCamelCase__ : Union[str, Any] = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCamelCase__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = "<pad>"
lowerCamelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1001 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , )
lowerCamelCase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = '''valhalla/s2t_mustc_multilinguial_medium'''
A__ = '''C\'est trop cool'''
A__ = '''Esto es genial'''
@classmethod
def lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
lowerCamelCase__ : Dict = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCamelCase__ : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Any = "fr"
lowerCamelCase__ : Any = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCamelCase__ : Optional[int] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import os
from distutils.util import strtobool
def lowercase_ ( _A : Any , _A : List[str] ):
"""simple docstring"""
for e in env_keys:
lowerCamelCase__ : Any = int(os.environ.get(lowerCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def lowercase_ ( _A : Optional[Any] , _A : Union[str, Any]=False ):
"""simple docstring"""
lowerCamelCase__ : List[str] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase_ ( _A : List[str] , _A : Tuple="no" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return value
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int=0.2 , __lowerCamelCase : Dict=0.2 ):
'''simple docstring'''
lowerCamelCase__ : int = bp_numa
lowerCamelCase__ : str = bp_numa
lowerCamelCase__ : List[Any] = bp_numa
lowerCamelCase__ : int = conva_get[:2]
lowerCamelCase__ : Dict = conva_get[2]
lowerCamelCase__ : List[str] = size_pa
lowerCamelCase__ : List[str] = rate_w
lowerCamelCase__ : List[str] = rate_t
lowerCamelCase__ : Tuple = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase__ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase__ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase__ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase__ : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase__ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb" ) as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
print(f"Model saved: {save_path}" )
@classmethod
def lowerCAmelCase ( cls : List[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
with open(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : Any = pickle.load(__lowerCamelCase ) # noqa: S301
lowerCamelCase__ : Dict = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowerCamelCase__ : str = model_dic.get("size_pooling1" )
lowerCamelCase__ : str = model_dic.get("num_bp1" )
lowerCamelCase__ : List[str] = model_dic.get("num_bp2" )
lowerCamelCase__ : int = model_dic.get("num_bp3" )
lowerCamelCase__ : Dict = model_dic.get("rate_weight" )
lowerCamelCase__ : int = model_dic.get("rate_thre" )
# create model instance
lowerCamelCase__ : Union[str, Any] = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# modify model parameter
lowerCamelCase__ : Optional[int] = model_dic.get("w_conv1" )
lowerCamelCase__ : Union[str, Any] = model_dic.get("wkj" )
lowerCamelCase__ : Union[str, Any] = model_dic.get("vji" )
lowerCamelCase__ : Optional[Any] = model_dic.get("thre_conv1" )
lowerCamelCase__ : str = model_dic.get("thre_bp2" )
lowerCamelCase__ : int = model_dic.get("thre_bp3" )
return conv_ins
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return round(__lowerCamelCase , 3 )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = convs[0]
lowerCamelCase__ : Any = convs[1]
lowerCamelCase__ : str = np.shape(__lowerCamelCase )[0]
# get the data slice of original image data, data_focus
lowerCamelCase__ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ):
lowerCamelCase__ : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase__ : str = []
lowerCamelCase__ : int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowerCamelCase ):
lowerCamelCase__ : int = []
for i_focus in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase ) )
lowerCamelCase__ : Any = np.asmatrix(__lowerCamelCase ).reshape(
__lowerCamelCase , __lowerCamelCase )
data_featuremap.append(__lowerCamelCase )
# expanding the data slice to One dimenssion
lowerCamelCase__ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = np.asarray(__lowerCamelCase )
return focus_list, data_featuremap
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Any="average_pool" ):
'''simple docstring'''
lowerCamelCase__ : List[str] = len(featuremaps[0] )
lowerCamelCase__ : Optional[int] = int(size_map / size_pooling )
lowerCamelCase__ : Dict = []
for i_map in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Optional[int] = featuremaps[i_map]
lowerCamelCase__ : Dict = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase ):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase ) )
lowerCamelCase__ : List[Any] = np.asmatrix(__lowerCamelCase ).reshape(__lowerCamelCase , __lowerCamelCase )
featuremap_pooled.append(__lowerCamelCase )
return featuremap_pooled
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
for i in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : List[str] = np.shape(data[i] )
lowerCamelCase__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = np.asarray(__lowerCamelCase )
return data_expanded
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : int = np.asarray(__lowerCamelCase )
lowerCamelCase__ : Any = np.shape(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = 0
for i_map in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = np.ones((size_map, size_map) )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] = pd_pool[
i_pool
]
lowerCamelCase__ : Optional[Any] = i_pool + 1
lowerCamelCase__ : List[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowerCamelCase )
return pd_all
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=bool ):
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase )) )
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase__ : Optional[int] = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(__lowerCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase__ : Dict = np.asmatrix(datas_train[p] )
lowerCamelCase__ : List[Any] = np.asarray(datas_teach[p] )
lowerCamelCase__ , lowerCamelCase__ : str = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ : int = self.pooling(__lowerCamelCase , self.size_poolinga )
lowerCamelCase__ : Union[str, Any] = np.shape(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = self._expand(__lowerCamelCase )
lowerCamelCase__ : str = data_bp_input
lowerCamelCase__ : str = np.dot(__lowerCamelCase , self.vji.T ) - self.thre_bpa
lowerCamelCase__ : int = self.sig(__lowerCamelCase )
lowerCamelCase__ : Any = np.dot(__lowerCamelCase , self.wkj.T ) - self.thre_bpa
lowerCamelCase__ : Any = self.sig(__lowerCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase__ : Dict = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) )
lowerCamelCase__ : str = np.multiply(
np.dot(__lowerCamelCase , self.wkj ) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) )
lowerCamelCase__ : Tuple = np.dot(__lowerCamelCase , self.vji )
lowerCamelCase__ : Optional[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase__ : List[str] = pd_conva_pooled.T.getA().tolist()
lowerCamelCase__ : Any = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase__ : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase__ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase__ : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase__ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase__ : Dict = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase__ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase__ : Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase__ : Optional[Any] = rp + 1
lowerCamelCase__ : Dict = error_count / patterns
all_mse.append(__lowerCamelCase )
def draw_error():
lowerCamelCase__ : int = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowerCamelCase , "+-" )
plt.plot(__lowerCamelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowerCamelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def lowerCAmelCase ( self : str , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase )) )
for p in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Union[str, Any] = np.asmatrix(datas_test[p] )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga )
lowerCamelCase__ : Optional[Any] = self._expand(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = data_bp_input
lowerCamelCase__ : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase__ : List[str] = self.sig(__lowerCamelCase )
lowerCamelCase__ : Any = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase__ : Optional[int] = self.sig(__lowerCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase__ : List[str] = [list(map(self.do_round , __lowerCamelCase ) ) for each in produce_out]
return np.asarray(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : str = np.asmatrix(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ : Tuple = self.pooling(__lowerCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
# Imports
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
if red is not None:
lowerCamelCase__ : Dict = red
if green is not None:
lowerCamelCase__ : Any = green
if blue is not None:
lowerCamelCase__ : Union[str, Any] = blue
if red_edge is not None:
lowerCamelCase__ : Tuple = red_edge
if nir is not None:
lowerCamelCase__ : Any = nir
return True
def lowerCAmelCase ( self : int , __lowerCamelCase : Tuple="" , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : int = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int=0.0_8 , __lowerCamelCase : Any=1.2_2 , __lowerCamelCase : Union[str, Any]=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.nir - self.green
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[str]=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Any=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.nir / self.red
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase__ : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.nir / self.red
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = BarthezTokenizer
A__ = BarthezTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "<pad>"
lowerCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 101122 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2]
lowerCamelCase__ : Tuple = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 5 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A : Tuple = True
except (ImportError, AttributeError):
A : Union[str, Any] = object
def lowercase_ ( *_A : List[Any] , **_A : Any ):
"""simple docstring"""
pass
A : int = False
A : List[Any] = logging.get_logger("transformers-cli/serving")
def lowercase_ ( _A : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A_ , args.host , args.port , args.workers )
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
A__ = 42
A__ = 42
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : ArgumentParser ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=UpperCamelCase__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=UpperCamelCase__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=UpperCamelCase__ , default=8888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=UpperCamelCase__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=UpperCamelCase__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=UpperCamelCase__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=UpperCamelCase__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=UpperCamelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Optional[Any] , __lowerCamelCase : Pipeline , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pipeline
lowerCamelCase__ : List[Any] = host
lowerCamelCase__ : Union[str, Any] = port
lowerCamelCase__ : int = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f"Serving model over {host}:{port}" )
lowerCamelCase__ : Optional[int] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
] , timeout=600 , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : str = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ):
'''simple docstring'''
try:
lowerCamelCase__ : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase__ )
if return_ids:
lowerCamelCase__ : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(UpperCamelCase__ )} )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , ):
'''simple docstring'''
try:
lowerCamelCase__ : Dict = self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return ServeDeTokenizeResult(model="" , text=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(UpperCamelCase__ )} )
async def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any]=Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase__ : Any = self._pipeline(UpperCamelCase__ )
return ServeForwardResult(output=UpperCamelCase__ )
except Exception as e:
raise HTTPException(500 , {"error": str(UpperCamelCase__ )} )
| 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 0 |
A : Any = '''Input must be a string of 8 numbers plus letter'''
A : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Expected string as input, found {type(_A ).__name__}"
raise TypeError(_A )
lowerCamelCase__ : List[Any] = spanish_id.replace("-" , "" ).upper()
if len(_A ) != 9:
raise ValueError(_A )
try:
lowerCamelCase__ : Any = int(spanish_id_clean[0:8] )
lowerCamelCase__ : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_A ) from ex
if letter.isdigit():
raise ValueError(_A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A : List[str] = False
try:
A : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : Dict = [] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = choices
lowerCamelCase__ : int = prompt
if sys.platform == "win32":
lowerCamelCase__ : str = "*"
else:
lowerCamelCase__ : Optional[Any] = "➔ "
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict ):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(__a )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] = 1 ):
'''simple docstring'''
lowerCamelCase__ : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Any = int(chr(self.current_selection ) )
lowerCamelCase__ : int = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[str] = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCamelCase__ : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase__ : Optional[int] = int(builtins.input() )
except ValueError:
lowerCamelCase__ : Optional[int] = default_choice
else:
lowerCamelCase__ : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ : Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase_ ) )
lowerCamelCase__ : str = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[int] , **__lowerCamelCase : str ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : Any , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
lowerCamelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : Any = image_processor(lowerCamelCase_ , return_tensors="np" )
lowerCamelCase__ : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : int = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Optional[Any] = processor.batch_decode(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Tuple = """lower newer"""
lowerCamelCase__ : Any = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[int]=32 * 4 , __lowerCamelCase : Optional[Any]=32 * 6 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : int=32 , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Tuple = use_auxiliary_loss
lowerCamelCase__ : Tuple = num_queries
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : List[Any] = min_size
lowerCamelCase__ : int = max_size
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Optional[int] = mask_feature_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
lowerCamelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
lowerCamelCase__ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
lowerCamelCase__ : int = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
lowerCamelCase__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = output.encoder_hidden_states
lowerCamelCase__ : Union[str, Any] = output.pixel_decoder_hidden_states
lowerCamelCase__ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase__ : Dict = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCamelCase__ : List[str] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(__lowerCamelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCamelCase__ : Any = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
lowerCamelCase__ : Any = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
"""simple docstring"""
A__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A__ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MaskFormerModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(UpperCamelCase_ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[str] = [*signature.parameters.keys()]
lowerCamelCase__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase__ : str = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCamelCase__ : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
lowerCamelCase__ : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
lowerCamelCase__ : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : List[Any] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCamelCase__ : Dict = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : str = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCamelCase__ : List[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase__ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A : List[str] = 1E-4
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : str = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(UpperCamelCase_ )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : str = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase_ )
lowerCamelCase__ : Any = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCamelCase__ : str = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCamelCase__ : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCamelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCamelCase__ : Optional[int] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCamelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : int = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCamelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : List[Any] = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCamelCase__ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCamelCase__ : List[str] = inputs["pixel_values"].to(UpperCamelCase_ )
lowerCamelCase__ : List[Any] = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]]
lowerCamelCase__ : int = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Any = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.